diff --git a/go.mod b/go.mod index 5265e6651c8..8b2d2ae3bf8 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.138.0 github.com/opentracing-contrib/go-grpc v0.1.1 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pierrec/lz4/v4 v4.1.23 // indirect + github.com/pierrec/lz4/v4 v4.1.25 // indirect github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.23.2 github.com/prometheus/client_model v0.6.2 @@ -48,11 +48,11 @@ require ( github.com/stretchr/testify v1.11.1 github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/willf/bloom v2.0.3+incompatible - go.opentelemetry.io/collector v0.138.0 // indirect - go.opentelemetry.io/collector/component v1.44.0 - go.opentelemetry.io/collector/confmap v1.44.0 - go.opentelemetry.io/collector/consumer v1.44.0 - go.opentelemetry.io/collector/pdata v1.44.0 + go.opentelemetry.io/collector v0.146.0 // indirect + go.opentelemetry.io/collector/component v1.51.0 + go.opentelemetry.io/collector/confmap v1.51.0 + go.opentelemetry.io/collector/consumer v1.51.0 + go.opentelemetry.io/collector/pdata v1.51.0 go.opentelemetry.io/collector/semconv v0.128.1-0.20250610090210-188191247685 // indirect go.opentelemetry.io/otel v1.40.0 go.opentelemetry.io/otel/metric v1.40.0 @@ -99,21 +99,21 @@ require ( github.com/twmb/franz-go/pkg/kmsg v1.12.0 github.com/twmb/franz-go/plugin/kotel v1.6.0 github.com/twmb/franz-go/plugin/kprom v1.2.1 - go.opentelemetry.io/collector/client v1.44.0 - go.opentelemetry.io/collector/component/componenttest v0.138.0 - go.opentelemetry.io/collector/config/configgrpc v0.138.0 - go.opentelemetry.io/collector/config/confighttp v0.138.0 - go.opentelemetry.io/collector/config/configopaque v1.44.0 - go.opentelemetry.io/collector/config/configtls v1.44.0 - go.opentelemetry.io/collector/exporter v1.44.0 - go.opentelemetry.io/collector/exporter/exportertest v0.138.0 - go.opentelemetry.io/collector/exporter/otlpexporter v0.138.0 - go.opentelemetry.io/collector/exporter/otlphttpexporter v0.138.0 - go.opentelemetry.io/collector/otelcol v0.138.0 - go.opentelemetry.io/collector/pdata/testdata v0.138.0 - go.opentelemetry.io/collector/processor v1.44.0 - go.opentelemetry.io/collector/receiver v1.44.0 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.138.0 + go.opentelemetry.io/collector/client v1.51.0 + go.opentelemetry.io/collector/component/componenttest v0.146.0 + go.opentelemetry.io/collector/config/configgrpc v0.146.0 + go.opentelemetry.io/collector/config/confighttp v0.146.0 + go.opentelemetry.io/collector/config/configopaque v1.51.0 + go.opentelemetry.io/collector/config/configtls v1.51.0 + go.opentelemetry.io/collector/exporter v1.51.0 + go.opentelemetry.io/collector/exporter/exportertest v0.146.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.146.0 + go.opentelemetry.io/collector/exporter/otlphttpexporter v0.146.0 + go.opentelemetry.io/collector/otelcol v0.146.0 + go.opentelemetry.io/collector/pdata/testdata v0.146.0 + go.opentelemetry.io/collector/processor v1.51.0 + go.opentelemetry.io/collector/receiver v1.51.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.146.0 go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 @@ -183,7 +183,7 @@ require ( github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/ebitengine/purego v0.9.0 // indirect + github.com/ebitengine/purego v0.9.1 // indirect github.com/edsrzf/mmap-go v1.2.0 // indirect github.com/elastic/go-grok v0.3.1 // indirect github.com/elastic/lunes v0.1.0 // indirect @@ -192,7 +192,7 @@ require ( github.com/expr-lang/expr v1.17.7 // indirect github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/foxboron/go-tpm-keyfiles v0.0.0-20250903184740-5d135037bd4d // indirect + github.com/foxboron/go-tpm-keyfiles v0.0.0-20251226215517-609e4778396f // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-jose/go-jose/v4 v4.1.3 // indirect @@ -208,13 +208,13 @@ require ( github.com/go-openapi/strfmt v0.24.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/validate v0.24.0 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/go-viper/mapstructure/v2 v2.5.0 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/goccy/go-json v0.10.5 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/golang-jwt/jwt/v5 v5.3.0 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/go-tpm v0.9.6 // indirect + github.com/google/go-tpm v0.9.8 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect github.com/grafana/otel-profiling-go v0.5.1 // indirect @@ -231,7 +231,7 @@ require ( github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-sockaddr v1.0.7 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/go-version v1.8.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/memberlist v0.5.2 // indirect github.com/hashicorp/serf v0.10.2 // indirect @@ -250,9 +250,9 @@ require ( github.com/klauspost/cpuid/v2 v2.2.11 // indirect github.com/klauspost/crc32 v1.3.0 // indirect github.com/knadh/koanf v1.5.0 // indirect - github.com/knadh/koanf/v2 v2.3.0 // indirect + github.com/knadh/koanf/v2 v2.3.2 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect + github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect github.com/magefile/mage v1.15.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.14 // indirect @@ -311,19 +311,19 @@ require ( github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver/v6 v6.0.0 // indirect - github.com/shirou/gopsutil/v4 v4.25.9 // indirect + github.com/shirou/gopsutil/v4 v4.26.1 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.15.0 // indirect github.com/spf13/cast v1.10.0 // indirect - github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/cobra v1.10.2 // indirect github.com/spf13/pflag v1.0.10 // indirect github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/tinylib/msgp v1.6.1 // indirect - github.com/tklauser/go-sysconf v0.3.15 // indirect - github.com/tklauser/numcpus v0.10.0 // indirect + github.com/tklauser/go-sysconf v0.3.16 // indirect + github.com/tklauser/numcpus v0.11.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/twmb/franz-go/pkg/sasl/kerberos v1.1.0 // indirect github.com/twmb/franz-go/plugin/kzap v1.1.2 // indirect @@ -345,54 +345,53 @@ require ( go.etcd.io/etcd/client/v3 v3.5.12 // indirect go.mongodb.org/mongo-driver v1.17.4 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/collector/component/componentstatus v0.138.0 // indirect - go.opentelemetry.io/collector/config/configauth v1.44.0 // indirect - go.opentelemetry.io/collector/config/configcompression v1.44.0 // indirect - go.opentelemetry.io/collector/config/configmiddleware v1.44.0 // indirect - go.opentelemetry.io/collector/config/confignet v1.44.0 // indirect - go.opentelemetry.io/collector/config/configoptional v1.44.0 // indirect - go.opentelemetry.io/collector/config/configretry v1.44.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.138.0 // indirect - go.opentelemetry.io/collector/confmap/xconfmap v0.138.0 // indirect - go.opentelemetry.io/collector/connector v0.138.0 // indirect - go.opentelemetry.io/collector/connector/connectortest v0.138.0 // indirect - go.opentelemetry.io/collector/connector/xconnector v0.138.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.138.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.138.0 // indirect - go.opentelemetry.io/collector/consumer/consumertest v0.138.0 // indirect - go.opentelemetry.io/collector/consumer/xconsumer v0.138.0 // indirect - go.opentelemetry.io/collector/exporter/exporterhelper v0.138.0 // indirect - go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.138.0 // indirect - go.opentelemetry.io/collector/exporter/xexporter v0.138.0 // indirect - go.opentelemetry.io/collector/extension v1.44.0 // indirect - go.opentelemetry.io/collector/extension/extensionauth v1.44.0 // indirect - go.opentelemetry.io/collector/extension/extensioncapabilities v0.138.0 // indirect - go.opentelemetry.io/collector/extension/extensionmiddleware v0.138.0 // indirect - go.opentelemetry.io/collector/extension/extensiontest v0.138.0 // indirect - go.opentelemetry.io/collector/extension/xextension v0.138.0 // indirect - go.opentelemetry.io/collector/featuregate v1.44.0 // indirect - go.opentelemetry.io/collector/internal/fanoutconsumer v0.138.0 // indirect - go.opentelemetry.io/collector/internal/sharedcomponent v0.138.0 // indirect - go.opentelemetry.io/collector/internal/telemetry v0.138.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.138.0 // indirect - go.opentelemetry.io/collector/pdata/xpdata v0.138.0 // indirect - go.opentelemetry.io/collector/pipeline v1.44.0 // indirect - go.opentelemetry.io/collector/pipeline/xpipeline v0.138.0 // indirect + go.opentelemetry.io/collector/component/componentstatus v0.146.0 // indirect + go.opentelemetry.io/collector/config/configauth v1.51.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.51.0 // indirect + go.opentelemetry.io/collector/config/configmiddleware v1.51.0 // indirect + go.opentelemetry.io/collector/config/confignet v1.51.0 // indirect + go.opentelemetry.io/collector/config/configoptional v1.51.0 // indirect + go.opentelemetry.io/collector/config/configretry v1.51.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.146.0 // indirect + go.opentelemetry.io/collector/confmap/xconfmap v0.146.0 // indirect + go.opentelemetry.io/collector/connector v0.146.0 // indirect + go.opentelemetry.io/collector/connector/connectortest v0.146.0 // indirect + go.opentelemetry.io/collector/connector/xconnector v0.146.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.146.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.146.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.146.0 // indirect + go.opentelemetry.io/collector/consumer/xconsumer v0.146.0 // indirect + go.opentelemetry.io/collector/exporter/exporterhelper v0.146.0 // indirect + go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.146.0 // indirect + go.opentelemetry.io/collector/exporter/xexporter v0.146.0 // indirect + go.opentelemetry.io/collector/extension v1.51.0 // indirect + go.opentelemetry.io/collector/extension/extensionauth v1.51.0 // indirect + go.opentelemetry.io/collector/extension/extensioncapabilities v0.146.0 // indirect + go.opentelemetry.io/collector/extension/extensionmiddleware v0.146.0 // indirect + go.opentelemetry.io/collector/extension/extensiontest v0.146.0 // indirect + go.opentelemetry.io/collector/extension/xextension v0.146.0 // indirect + go.opentelemetry.io/collector/featuregate v1.51.0 // indirect + go.opentelemetry.io/collector/internal/componentalias v0.146.0 // indirect + go.opentelemetry.io/collector/internal/fanoutconsumer v0.146.0 // indirect + go.opentelemetry.io/collector/internal/sharedcomponent v0.146.0 // indirect + go.opentelemetry.io/collector/internal/telemetry v0.146.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.146.0 // indirect + go.opentelemetry.io/collector/pdata/xpdata v0.146.0 // indirect + go.opentelemetry.io/collector/pipeline v1.51.0 // indirect + go.opentelemetry.io/collector/pipeline/xpipeline v0.146.0 // indirect go.opentelemetry.io/collector/processor/processorhelper v0.138.0 // indirect go.opentelemetry.io/collector/processor/processorhelper/xprocessorhelper v0.138.0 // indirect - go.opentelemetry.io/collector/processor/processortest v0.138.0 // indirect - go.opentelemetry.io/collector/processor/xprocessor v0.138.0 // indirect - go.opentelemetry.io/collector/receiver/receiverhelper v0.138.0 // indirect - go.opentelemetry.io/collector/receiver/receivertest v0.138.0 // indirect - go.opentelemetry.io/collector/receiver/xreceiver v0.138.0 // indirect - go.opentelemetry.io/collector/service v0.138.0 // indirect - go.opentelemetry.io/collector/service/hostcapabilities v0.138.0 // indirect - go.opentelemetry.io/contrib/bridges/otelzap v0.13.0 // indirect + go.opentelemetry.io/collector/processor/processortest v0.146.0 // indirect + go.opentelemetry.io/collector/processor/xprocessor v0.146.0 // indirect + go.opentelemetry.io/collector/receiver/receiverhelper v0.146.0 // indirect + go.opentelemetry.io/collector/receiver/receivertest v0.146.0 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.146.0 // indirect + go.opentelemetry.io/collector/service v0.146.0 // indirect + go.opentelemetry.io/collector/service/hostcapabilities v0.146.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 // indirect go.opentelemetry.io/contrib/otelconf v0.18.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.38.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 // indirect go.opentelemetry.io/contrib/samplers/jaegerremote v0.31.0 // indirect go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect @@ -405,7 +404,7 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect - go.opentelemetry.io/otel/log v0.14.0 // indirect + go.opentelemetry.io/otel/log v0.16.0 // indirect go.opentelemetry.io/otel/sdk/log v0.14.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect @@ -417,7 +416,7 @@ require ( golang.org/x/sys v0.41.0 // indirect golang.org/x/text v0.34.0 // indirect golang.org/x/tools v0.41.0 // indirect - gonum.org/v1/gonum v0.16.0 // indirect + gonum.org/v1/gonum v0.17.0 // indirect google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 4566aebc722..24d479aa284 100644 --- a/go.sum +++ b/go.sum @@ -235,8 +235,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4A github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/ebitengine/purego v0.9.0 h1:mh0zpKBIXDceC63hpvPuGLiJ8ZAa3DfrFTudmfi8A4k= -github.com/ebitengine/purego v0.9.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A= +github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84= github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b h1:ZHiD4/yE4idlbqvAO6iYCOYRzOMRpxkW+FKasRA3tsQ= @@ -277,10 +277,8 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/foxboron/go-tpm-keyfiles v0.0.0-20250903184740-5d135037bd4d h1:EdO/NMMuCZfxhdzTZLuKAciQSnI2DV+Ppg8+vAYrnqA= -github.com/foxboron/go-tpm-keyfiles v0.0.0-20250903184740-5d135037bd4d/go.mod h1:uAyTlAUxchYuiFjTHmuIEJ4nGSm7iOPaGcAyA81fJ80= -github.com/foxboron/swtpm_test v0.0.0-20230726224112-46aaafdf7006 h1:50sW4r0PcvlpG4PV8tYh2RVCapszJgaOLRCS2subvV4= -github.com/foxboron/swtpm_test v0.0.0-20230726224112-46aaafdf7006/go.mod h1:eIXCMsMYCaqq9m1KSSxXwQG11krpuNPGP3k0uaWrbas= +github.com/foxboron/go-tpm-keyfiles v0.0.0-20251226215517-609e4778396f h1:RJ+BDPLSHQO7cSjKBqjPJSbi1qfk9WcsjQDtZiw3dZw= +github.com/foxboron/go-tpm-keyfiles v0.0.0-20251226215517-609e4778396f/go.mod h1:VHbbch/X4roIY22jL1s3qRbZhCiRIgUAF/PdSUcx2io= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -339,8 +337,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro= +github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -404,10 +402,10 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/go-tpm v0.9.6 h1:Ku42PT4LmjDu1H5C5ISWLlpI1mj+Zq7sPGKoRw2XROA= -github.com/google/go-tpm v0.9.6/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= -github.com/google/go-tpm-tools v0.4.4 h1:oiQfAIkc6xTy9Fl5NKTeTJkBTlXdHsxAofmQyxBKY98= -github.com/google/go-tpm-tools v0.4.4/go.mod h1:T8jXkp2s+eltnCDIsXR84/MTcVU9Ja7bh3Mit0pa4AY= +github.com/google/go-tpm v0.9.8 h1:slArAR9Ft+1ybZu0lBwpSmpwhRXaa85hWtMinMyRAWo= +github.com/google/go-tpm v0.9.8/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= +github.com/google/go-tpm-tools v0.4.7 h1:J3ycC8umYxM9A4eF73EofRZu4BxY0jjQnUnkhIBbvws= +github.com/google/go-tpm-tools v0.4.7/go.mod h1:gSyXTZHe3fgbzb6WEGd90QucmsnT1SRdlye82gH8QjQ= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= @@ -500,8 +498,8 @@ github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -586,8 +584,8 @@ github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw= github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= -github.com/knadh/koanf/v2 v2.3.0 h1:Qg076dDRFHvqnKG97ZEsi9TAg2/nFTa9hCdcSa1lvlM= -github.com/knadh/koanf/v2 v2.3.0/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28= +github.com/knadh/koanf/v2 v2.3.2 h1:Ee6tuzQYFwcZXQpc2MiVeC6qHMandf5SMUJJNoFp/c4= +github.com/knadh/koanf/v2 v2.3.2/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -606,8 +604,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/linode/linodego v1.59.0 h1:kYz6sQH9g0u21gbI1UUFjZmFLirtc39JPybygrW76Q0= github.com/linode/linodego v1.59.0/go.mod h1:1+Bt0oTz5rBnDOJbGhccxn7LYVytXTIIfAy7QYmijDs= -github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0= -github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k= +github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg= github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -776,8 +774,8 @@ github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM= github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.23 h1:oJE7T90aYBGtFNrI8+KbETnPymobAhzRrR8Mu8n1yfU= -github.com/pierrec/lz4/v4 v4.1.23/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4= +github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0= +github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4= github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs= github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -860,8 +858,8 @@ github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtr github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/sercand/kuberesolver/v6 v6.0.0 h1:ScvS2Ga9snVkpOahln/BCLySr3/iBAHJf25u66DweZ0= github.com/sercand/kuberesolver/v6 v6.0.0/go.mod h1:Dxkqms3OJadP5zirIBPLi9FV8Qpys3T3w40XPEcVsu0= -github.com/shirou/gopsutil/v4 v4.25.9 h1:JImNpf6gCVhKgZhtaAHJ0serfFGtlfIlSC08eaKdTrU= -github.com/shirou/gopsutil/v4 v4.25.9/go.mod h1:gxIxoC+7nQRwUl/xNhutXlD8lq+jxTgpIkEf3rADHL8= +github.com/shirou/gopsutil/v4 v4.26.1 h1:TOkEyriIXk2HX9d4isZJtbjXbEjf5qyKPAzbzY0JWSo= +github.com/shirou/gopsutil/v4 v4.26.1/go.mod h1:medLI9/UNAb0dOI9Q3/7yWSqKkj00u+1tgY8nvv41pc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -875,8 +873,8 @@ github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= -github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= @@ -915,10 +913,10 @@ github.com/thanos-io/objstore v0.0.0-20220809103346-8ef1f215e2bf/go.mod h1:v0Nhu github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY= github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA= -github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= -github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= -github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= -github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= +github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA= +github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI= +github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw= +github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= @@ -995,144 +993,146 @@ go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFX go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/collector v0.138.0 h1:nIlyGQUoDlvtJENVuzOcYF8/zO8jTL1Lh8CxGNMo/yM= -go.opentelemetry.io/collector v0.138.0/go.mod h1:ZQYYPMuh4cm/E1L1pG6h5lJeH+qSCOFAPKzRQfjeGwQ= -go.opentelemetry.io/collector/client v1.44.0 h1:pfOlUf6pU/1MyucE7oC1Q/aZAxQS8icKA/iw2foHqPE= -go.opentelemetry.io/collector/client v1.44.0/go.mod h1:GoESF6Tpa5ikkYGFvctqgILCpBuG+F45HPznER6lPwk= -go.opentelemetry.io/collector/component v1.44.0 h1:SX5UO/gSDm+1zyvHVRFgpf8J1WP6U3y/SLUXiVEghbE= -go.opentelemetry.io/collector/component v1.44.0/go.mod h1:geKbCTNoQfu55tOPiDuxLzNZsoO9//HRRg10/8WusWk= -go.opentelemetry.io/collector/component/componentstatus v0.138.0 h1:KUZyp1b6W2UUb/m/IhakL4bBdX6cbBj0PPx7MZ/jtOo= -go.opentelemetry.io/collector/component/componentstatus v0.138.0/go.mod h1:IztgkWj4VDSb3afV5ZHutS3vpuVhGbueAzOKrCJ4/V8= -go.opentelemetry.io/collector/component/componenttest v0.138.0 h1:7a8whPDFu80uPk73iqeMdhYDVxl4oZEsuaBYb2ysXTc= -go.opentelemetry.io/collector/component/componenttest v0.138.0/go.mod h1:ODaEuyS6BrCnTVHCsLSRUtNklT3gnAIq0txYAAI2PKM= -go.opentelemetry.io/collector/config/configauth v1.44.0 h1:zYur6VJyHFtJW/1MSKyRaMO6+tsV12kCJot/kSkrpW4= -go.opentelemetry.io/collector/config/configauth v1.44.0/go.mod h1:8arPf8HFVkhKabgDsKqTggm081s71IYF8LogcGlHUeY= -go.opentelemetry.io/collector/config/configcompression v1.44.0 h1:AaNpVYWFrmWKGnZdJCuVSlY3STSm0UBTuZU13aavvlQ= -go.opentelemetry.io/collector/config/configcompression v1.44.0/go.mod h1:ZlnKaXFYL3HVMUNWVAo/YOLYoxNZo7h8SrQp3l7GV00= -go.opentelemetry.io/collector/config/configgrpc v0.138.0 h1:kY0vTvurV0PkeaJG/otkBrMNk6RGJk9n8s+5PpZJcGg= -go.opentelemetry.io/collector/config/configgrpc v0.138.0/go.mod h1:xOQCBmGksJxU/OUr28jxVTttS3x6Nc1IgkcbJU9MOoI= -go.opentelemetry.io/collector/config/confighttp v0.138.0 h1:6NaoRNwwS+Hci8XC+oxGH2njZTw/hm3Bv66TsvpBip8= -go.opentelemetry.io/collector/config/confighttp v0.138.0/go.mod h1:0NKEeugQ7zQ/q6REMqxNPOrkYH8LdpUm6e9OlzMbfZg= -go.opentelemetry.io/collector/config/configmiddleware v1.44.0 h1:lXIF5YMZi9hmyInvmGimmKKMtukSJP4CfvyKaLyIbUg= -go.opentelemetry.io/collector/config/configmiddleware v1.44.0/go.mod h1:7f+1+cmt4spFY3Gs14XB/04RSsDYG7ycTzvNJbeayPY= -go.opentelemetry.io/collector/config/confignet v1.44.0 h1:2bjbOxUz4z1XHSGF6UJxygdxdpG2vPf+SOh2UDww7zQ= -go.opentelemetry.io/collector/config/confignet v1.44.0/go.mod h1:4jJWdoe1MmpqxMzxrIILcS5FK2JPocXYZGUvv5ZQVKE= -go.opentelemetry.io/collector/config/configopaque v1.44.0 h1:bfpNfe42k7SEREJZ2l3jI0EKjCUqKslvlY3o4OGYhGg= -go.opentelemetry.io/collector/config/configopaque v1.44.0/go.mod h1:9uzLyGsWX0FtPWkomQXqLtblmSHgJFaM4T0gMBrCma0= -go.opentelemetry.io/collector/config/configoptional v1.44.0 h1:Jaq8V5JBVsdKQ275QkBuCYUMmZnlNMoCFatryRius2I= -go.opentelemetry.io/collector/config/configoptional v1.44.0/go.mod h1:AGi2klVapjAEHVPrBVdq+3dW9l3wfA2MLH9qn5Q8nSg= -go.opentelemetry.io/collector/config/configretry v1.44.0 h1:2EVcm1trnXhXaLQ2kFdLSnC6sg4a0t20nf78C2RJUd0= -go.opentelemetry.io/collector/config/configretry v1.44.0/go.mod h1:ZSTYqAJCq4qf+/4DGoIxCElDIl5yHt8XxEbcnpWBbMM= -go.opentelemetry.io/collector/config/configtelemetry v0.138.0 h1:biiZj+zecBttCcEKGmEF/wdWtPkKXm4YreN6ziF5xjg= -go.opentelemetry.io/collector/config/configtelemetry v0.138.0/go.mod h1:Xjw2+DpNLjYtx596EHSWBy0dNQRiJ2H+BlWU907lO40= -go.opentelemetry.io/collector/config/configtls v1.44.0 h1:UkFXToC6Y4p1S2a/ag5FkfRLZNxL24k3my0Tif/w2gY= -go.opentelemetry.io/collector/config/configtls v1.44.0/go.mod h1:wsOaG0LRnZjhRXpl0epNxba2HJzfZwmnKdu6NO7l7pw= -go.opentelemetry.io/collector/confmap v1.44.0 h1:CIK4jAk6H3KTKza4nvWQkqLqrudLkYGz3evu5163uxg= -go.opentelemetry.io/collector/confmap v1.44.0/go.mod h1:w37Xiu/PK3nTdqKb7YEvQECHYkuW7QnmdS7b9iRjOGo= -go.opentelemetry.io/collector/confmap/provider/fileprovider v1.44.0 h1:O0zEtfu8ReLeJcgDoWQuwOX0vStbRK6Kd9LUEvLhnJc= -go.opentelemetry.io/collector/confmap/provider/fileprovider v1.44.0/go.mod h1:dmx4+x1eZO85/CNViUhKs4eZAMy9q+TD3EBpR1o8fv8= -go.opentelemetry.io/collector/confmap/xconfmap v0.138.0 h1:0b/h3LXBAcHFKPE9eVjZ4KRTaj9ImdOBK2z9hBlmoyA= -go.opentelemetry.io/collector/confmap/xconfmap v0.138.0/go.mod h1:rk8hjMqoHX2KYUjGUPaiWo3qapj4o8UpQWWsdEqvorg= -go.opentelemetry.io/collector/connector v0.138.0 h1:IXYUH4jKtN86hJQmBCokpV+ZZwmmcW/qMyYeUFdKPew= -go.opentelemetry.io/collector/connector v0.138.0/go.mod h1:8vxTX+CoVZUn5H/COI+ZG/GcOB9B3pbsp94JvQBJGcE= -go.opentelemetry.io/collector/connector/connectortest v0.138.0 h1:fGEjDwEAwQd+TVICLW7wwQBQJ+lzDxkSQmkzumATP6k= -go.opentelemetry.io/collector/connector/connectortest v0.138.0/go.mod h1:+yPunb1zGzami8iHEFqlJI8GNRKN+wrgAYuI99LTKsw= -go.opentelemetry.io/collector/connector/xconnector v0.138.0 h1:omPoMK6PsxuTrxzvVk/SY76kW4nLFTPE/H8jtPa7M9w= -go.opentelemetry.io/collector/connector/xconnector v0.138.0/go.mod h1:NllJAPjA9yxKQOhLxgo0men45ncbqHymvkv1OGmxaZw= -go.opentelemetry.io/collector/consumer v1.44.0 h1:vkKJTfQYBQNuKas0P1zv1zxJjHvmMa/n7d6GiSHT0aw= -go.opentelemetry.io/collector/consumer v1.44.0/go.mod h1:t6u5+0FBUtyZLVFhVPgFabd4Iph7rP+b9VkxaY8dqXU= -go.opentelemetry.io/collector/consumer/consumererror v0.138.0 h1:UfdATL2xDBSUORs9ihlIEdsY6CTIKCnIOCjt0NCwzwg= -go.opentelemetry.io/collector/consumer/consumererror v0.138.0/go.mod h1:nkPNEi12ObrdScg48gCTB/64zydtRsDxktzM7knXUPY= -go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.138.0 h1:JnbdxkpldBUOgzwy1gKnWU3yEzHsTWSWsIajYsR8peI= -go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.138.0/go.mod h1:560LrhVUuMwVCeDPOG6IBylxj3mLgjawjNNn0PtNhnU= -go.opentelemetry.io/collector/consumer/consumertest v0.138.0 h1:1PwWhjQ3msYhcml/YeeSegjUAVC4nlA8+LY5uKqJbHk= -go.opentelemetry.io/collector/consumer/consumertest v0.138.0/go.mod h1:2XBKvZKVcF/7ts1Y+PxTgrQiBhXAnzMfT+1VKtzoDpQ= -go.opentelemetry.io/collector/consumer/xconsumer v0.138.0 h1:peQ59TyBmt30lv4YH8gfBbTSJPuPIZW0kpFTfk45rVk= -go.opentelemetry.io/collector/consumer/xconsumer v0.138.0/go.mod h1:ivpzDlwQowx8RTOZBPa281/4NvNBvhabm7JmeAbsGIU= -go.opentelemetry.io/collector/exporter v1.44.0 h1:d6aDF8acZbJBT/S6MtGNSRPammZbBs5t+31BOw6vVtQ= -go.opentelemetry.io/collector/exporter v1.44.0/go.mod h1:2cn4CQt+tTNtK2buESGtgw+h1L8KHOShIBdSmiUMiwo= -go.opentelemetry.io/collector/exporter/exporterhelper v0.138.0 h1:Bh46gUfxeynQ4V+drddzI5srEpDKt+y1wea25fzVGfk= -go.opentelemetry.io/collector/exporter/exporterhelper v0.138.0/go.mod h1:m1Vi/iSWyXEqZY/k09imDYQ4435eX7Hvm1GPT0HklfI= -go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.138.0 h1:NkZ7Q7EkT0W9Xwro9/2M5NiWB8FlhzgPNjWoHad6To0= -go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.138.0/go.mod h1:lnOVUUMOVa9Qcl18TLTsV2fLFUepsKY95AJ2/kfgmio= -go.opentelemetry.io/collector/exporter/exportertest v0.138.0 h1:vaMXeAVqZ4Jk1XrF0F2f+3psiGnIN4KOdC6O0mAeY2g= -go.opentelemetry.io/collector/exporter/exportertest v0.138.0/go.mod h1:yi6DUe4S4hfH3TYq1PbaFrnq/oeQ/DQ98zdJlCu5ZGo= -go.opentelemetry.io/collector/exporter/otlpexporter v0.138.0 h1:g4Q1WX57cOwtGviPM9rsjIiYTsi3GxGgL3P4HeUN9I0= -go.opentelemetry.io/collector/exporter/otlpexporter v0.138.0/go.mod h1:5SCy2nm8VpWhPsWqKEzFc9PdOXMcL+7BQYhDoqZ9U74= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.138.0 h1:qkZN1ASMlQVV87rkIjngBXoG9khaqXfynxotkLqqu6Q= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.138.0/go.mod h1:BUMGEMWulAHfhqFGOb3nrwCWWPXD07CY8vr+drJb47Y= -go.opentelemetry.io/collector/exporter/xexporter v0.138.0 h1:J0+pTIrVL2g3NCVsYI+nKtRQfYZRzi+GvRwif4Ugs20= -go.opentelemetry.io/collector/exporter/xexporter v0.138.0/go.mod h1:2XwR51JEJdP5nc10mL682FV/YuFght57KbsOLbNmdE8= -go.opentelemetry.io/collector/extension v1.44.0 h1:MYoeNxhHayogTfkTvOKa+FbAxkrivLI6ka3ibkqi+RQ= -go.opentelemetry.io/collector/extension v1.44.0/go.mod h1:Lr6V2Y5bF9hLLbahKl0Y3T0vQmOBJX+u/W0iZ0xa/LM= -go.opentelemetry.io/collector/extension/extensionauth v1.44.0 h1:30JTv1rjRE+2R3wV8tA/ENz013il5IsKeyGFHTHG8U0= -go.opentelemetry.io/collector/extension/extensionauth v1.44.0/go.mod h1:6Sh0hqPfPqpg0ErCoNPO/ky2NdfGmUX+G5wekPx7A7U= -go.opentelemetry.io/collector/extension/extensionauth/extensionauthtest v0.138.0 h1:ESiON4jDR8dhU4vPj11GcYPT+KFWgc1YnEKqS5Sc/us= -go.opentelemetry.io/collector/extension/extensionauth/extensionauthtest v0.138.0/go.mod h1:w0c7bgP2FiyZlFPbIIkfn8yqQW1cqGY2DXaaT8oscIA= -go.opentelemetry.io/collector/extension/extensioncapabilities v0.138.0 h1:p3Xlbr3U3HqYZBlq2x5xxivj2KpqiwS9tgAZMm69pyc= -go.opentelemetry.io/collector/extension/extensioncapabilities v0.138.0/go.mod h1:kN+Y8aXlnjoXRXEl/9dVusU+6u0CXm3YjeivtMJcC+8= -go.opentelemetry.io/collector/extension/extensionmiddleware v0.138.0 h1:e80GXYoQ5HpZS+2TLtigPhi8IWNeYB/8s1LXP2fiWCk= -go.opentelemetry.io/collector/extension/extensionmiddleware v0.138.0/go.mod h1:/ub63cgY3YraiJJ3pBuxDnxEzeEXqniuRDQYf6NIBDE= -go.opentelemetry.io/collector/extension/extensionmiddleware/extensionmiddlewaretest v0.138.0 h1:A574ECis4EzO5Yq+u4lUfZDXiYrSco4A0XtOte6DCvY= -go.opentelemetry.io/collector/extension/extensionmiddleware/extensionmiddlewaretest v0.138.0/go.mod h1:sx6H9WWy0IyXmeR1ZRSlFA8WCNATtmPUCb5C1+2XdVw= -go.opentelemetry.io/collector/extension/extensiontest v0.138.0 h1:YyMrXjGleqfwXcFIxfdHP8F8QFQDSyoZiEji4LEteDA= -go.opentelemetry.io/collector/extension/extensiontest v0.138.0/go.mod h1:pNvTqjxoJQQzT/qgjZEA46PBQDiyb3PZ3vATpCSfOE4= -go.opentelemetry.io/collector/extension/xextension v0.138.0 h1:dBjdmdauSZiYVuOBKythzus+eDPUi1y0m0iVQHB8bAY= -go.opentelemetry.io/collector/extension/xextension v0.138.0/go.mod h1:cdIt9OvY1pHihByNAvnEZH8ggGaSmrHCwVNwRAWVxY8= -go.opentelemetry.io/collector/extension/zpagesextension v0.138.0 h1:1swzU4qtkabuMbz1cLAKTAFC9pkdrcmtfyJNVtH2fK0= -go.opentelemetry.io/collector/extension/zpagesextension v0.138.0/go.mod h1:40fHxLsqsimhZRgreCzigKpQLhg8LPv/NUy1ytaiWIk= -go.opentelemetry.io/collector/featuregate v1.44.0 h1:/GeGhTD8f+FNWS7C4w1Dj0Ui9Jp4v2WAdlXyW1p3uG8= -go.opentelemetry.io/collector/featuregate v1.44.0/go.mod h1:d0tiRzVYrytB6LkcYgz2ESFTv7OktRPQe0QEQcPt1L4= -go.opentelemetry.io/collector/internal/fanoutconsumer v0.138.0 h1:/kngt0FhbxanEBGdhe2yGgmvGXES1gzRubQFzivOKGU= -go.opentelemetry.io/collector/internal/fanoutconsumer v0.138.0/go.mod h1:HWy0CDMrNvomfIz4uzs+eEOx5pt8xeacdPQzMMY83jU= -go.opentelemetry.io/collector/internal/sharedcomponent v0.138.0 h1:jVuz4ZvF8rw1NQ+up6eCyZE5w1bGQf7qV9AGGDkWTps= -go.opentelemetry.io/collector/internal/sharedcomponent v0.138.0/go.mod h1:VDLlLDAyGAhMsVhpZKZAtfxXvmJlUZO4IiQe58Ftixg= -go.opentelemetry.io/collector/internal/telemetry v0.138.0 h1:xHHYlPh1vVvr+ip0ct288l1joc4bsEeHh0rcY3WVXJo= -go.opentelemetry.io/collector/internal/telemetry v0.138.0/go.mod h1:evqf71fdIMXdQEofbs1bVnBUzfF6zysLMLR9bEAS9Xw= -go.opentelemetry.io/collector/otelcol v0.138.0 h1:vHxPRw/By8OH4NnAPfwRPCMXoTgUSzQLed3RWocmk68= -go.opentelemetry.io/collector/otelcol v0.138.0/go.mod h1:Jgq+R+9/awmgNurYIwzxKpszruAXNV3hb8g8fKXBczY= -go.opentelemetry.io/collector/pdata v1.44.0 h1:q/EfWDDKrSaf4hjTIzyPeg1ZcCRg1Uj7VTFnGfNVdk8= -go.opentelemetry.io/collector/pdata v1.44.0/go.mod h1:LnsjYysFc3AwMVh6KGNlkGKJUF2ReuWxtD9Hb3lSMZk= -go.opentelemetry.io/collector/pdata/pprofile v0.138.0 h1:ElnIPJK8jVzHYSnzbIVjg/v2Yq8iVLUKf7kB00zUFlE= -go.opentelemetry.io/collector/pdata/pprofile v0.138.0/go.mod h1:M7/5+Q4LohEkEB38kHhFu3S3XCA1eGSGz5uSXvNyMlM= -go.opentelemetry.io/collector/pdata/testdata v0.138.0 h1:6geeGQ4Rsb88OARLcACKn09PVIbhExaNJ1aC9OVLZaw= -go.opentelemetry.io/collector/pdata/testdata v0.138.0/go.mod h1:4wvgY+KTP7ohJVd1/pb8UIKb2TA/girsZbGTKqM5e20= -go.opentelemetry.io/collector/pdata/xpdata v0.138.0 h1:x/9RMlIY9lUXHnqBx5G2XYF7ouKREnai8yRPOh6SrUw= -go.opentelemetry.io/collector/pdata/xpdata v0.138.0/go.mod h1:Ws/JFbS2/P9KiwnVF1vL2narr+0x4d8ZK203yTznyb8= -go.opentelemetry.io/collector/pipeline v1.44.0 h1:EFdFBg3Wm2BlMtQbUeork5a4KFpS6haInSr+u/dk8rg= -go.opentelemetry.io/collector/pipeline v1.44.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI= -go.opentelemetry.io/collector/pipeline/xpipeline v0.138.0 h1:Y8blByFwDqhnEa4kOTAznx8Z89wZcAIntJx/a53BllA= -go.opentelemetry.io/collector/pipeline/xpipeline v0.138.0/go.mod h1:TOtck/PIWC89dI9+aYouX39boc7d+rGHP82SuH0xxN0= -go.opentelemetry.io/collector/processor v1.44.0 h1:jB+vfkYSR9f7HJlGJrtncld9dmnPWndCoTHZ0Wz4nvg= -go.opentelemetry.io/collector/processor v1.44.0/go.mod h1:BV0s5J7TH2YrVErfYAXvq3Z2ChJZdE84pY+sk1X55kw= +go.opentelemetry.io/collector v0.146.0 h1:2p1XQqBZYK5WTZRI+pgDHLhvxUNICeooVJJseQH0Dro= +go.opentelemetry.io/collector v0.146.0/go.mod h1:uWrObgqHLmnTEJUcexsD21zQAdHeNRwQGoZgGf5m1S8= +go.opentelemetry.io/collector/client v1.51.0 h1:7FaC2gglA7OWol/wMMSpoE1nFY6oewIIyf3nqVzO8m8= +go.opentelemetry.io/collector/client v1.51.0/go.mod h1:lx+VIlIm1/qaUeWs4ozeV/Q9y9rJQGwQo+dnk+We5TQ= +go.opentelemetry.io/collector/component v1.51.0 h1:btNW76MCRmpsk0ARRT5wspDXF9tvdaLd3uBtYXIiQn0= +go.opentelemetry.io/collector/component v1.51.0/go.mod h1:Zlgwh4yTLDhJglOXqiyXZ7paepTvvoijfFjLqOr/Qww= +go.opentelemetry.io/collector/component/componentstatus v0.146.0 h1:d4MsWsiqDEGeUCM176IS+pYcULwX5/6+gf975pW5wbs= +go.opentelemetry.io/collector/component/componentstatus v0.146.0/go.mod h1:ttB6cw2wu9vftrJFIFrAu1Kf7A3LEgeDU6pcG9pdLlY= +go.opentelemetry.io/collector/component/componenttest v0.146.0 h1:t6aUtZhryMW0/uOwjSmCAykv/5orGXAQ9Nk3oea+xqg= +go.opentelemetry.io/collector/component/componenttest v0.146.0/go.mod h1:W36xFSBn5GWFZG27eI9T0wEyhbwn/dWnJ7LkP9abK60= +go.opentelemetry.io/collector/config/configauth v1.51.0 h1:89pjoUxbmUGURr8PyaxowuIlISrBkwJUbr/JhCpL4EI= +go.opentelemetry.io/collector/config/configauth v1.51.0/go.mod h1:RXorbqKrG63mBLglhvH+A1Gn9R74JH/agPC31goV33Y= +go.opentelemetry.io/collector/config/configcompression v1.51.0 h1:kqLzehPPinndkt2M5axkzxOKSgHZwVTrcIfuTQ9itpw= +go.opentelemetry.io/collector/config/configcompression v1.51.0/go.mod h1:ZlnKaXFYL3HVMUNWVAo/YOLYoxNZo7h8SrQp3l7GV00= +go.opentelemetry.io/collector/config/configgrpc v0.146.0 h1:1SQQ3ydM+9d/zIRXiwX6OFcdHGCwL5VSN+uHJDboy3M= +go.opentelemetry.io/collector/config/configgrpc v0.146.0/go.mod h1:6tucMK/28bggtvnfTvco65w47GD5QVf978nucYpZviU= +go.opentelemetry.io/collector/config/confighttp v0.146.0 h1:MbDP/3koqYMzbTRMepdjFbc+rNOef0WsWujSgF7rJPo= +go.opentelemetry.io/collector/config/confighttp v0.146.0/go.mod h1:jQGj7AhU13JMkC8GdlF1Yr4kDhreCeydTQBLmFzbhVI= +go.opentelemetry.io/collector/config/configmiddleware v1.51.0 h1:AMZP9+LgFoAdfNTkx+qfFPqBiQY3k8yCigjv6HUbGe0= +go.opentelemetry.io/collector/config/configmiddleware v1.51.0/go.mod h1:37G0+KEiJf0ZYw4q2euslxkx1WaKun//KV8vaw1HkRA= +go.opentelemetry.io/collector/config/confignet v1.51.0 h1:gEIPVPbboYi/ESt2WyfZBPjtrM2zPnKJX2shmNUbtok= +go.opentelemetry.io/collector/config/confignet v1.51.0/go.mod h1:4jJWdoe1MmpqxMzxrIILcS5FK2JPocXYZGUvv5ZQVKE= +go.opentelemetry.io/collector/config/configopaque v1.51.0 h1:z8Q72mBMQ6P4me+umu1kCC3sqzX+zQ7OJju5oQcdZv8= +go.opentelemetry.io/collector/config/configopaque v1.51.0/go.mod h1:w77VAty/J8dxrSyq0ObbvQxh+xh0tVg+SQqFQ7SQRzM= +go.opentelemetry.io/collector/config/configoptional v1.51.0 h1:kVD8B3JF0Hd5LrRhHIKXAcHeTbQk9cxa0nD06IgJ+Gs= +go.opentelemetry.io/collector/config/configoptional v1.51.0/go.mod h1:nBG71pzrklmiPIp1XPQiO3RzlbLIolUlFrW30q1UXzM= +go.opentelemetry.io/collector/config/configretry v1.51.0 h1:HUeaYeFPKEFHxlfj+EubxOa1HdfowlmkylTlvCkPvBU= +go.opentelemetry.io/collector/config/configretry v1.51.0/go.mod h1:ZSTYqAJCq4qf+/4DGoIxCElDIl5yHt8XxEbcnpWBbMM= +go.opentelemetry.io/collector/config/configtelemetry v0.146.0 h1:qhNTzL7jFpkhtNuPz5aor0OdbHKh86HEuODn0cw7maA= +go.opentelemetry.io/collector/config/configtelemetry v0.146.0/go.mod h1:vLUthxDJbDk0ZE9MXPvmSslNESDdGblIXWoDMov3UOE= +go.opentelemetry.io/collector/config/configtls v1.51.0 h1:fkZ3o3i6A7MCQBYCid2ZBYgaE3bYWpr3EognX09C1Tc= +go.opentelemetry.io/collector/config/configtls v1.51.0/go.mod h1:d2yeGb0Bt0WA9cL9SpC1nfhu5Qfiz+PhtQoecs+Kong= +go.opentelemetry.io/collector/confmap v1.51.0 h1:C9YlMNkIgzuauLpUz2F7DLlWwqAmkQKNcKj1XATVWuE= +go.opentelemetry.io/collector/confmap v1.51.0/go.mod h1:uWi4b9lHfvEC2poJ2I2vXwGUREVEQTcdUguOpfqdcHM= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.51.0 h1:yXEYXIowoLdmDF0CPUhLa0mgbhRuvySbcs7OShNsFaE= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.51.0/go.mod h1:ew7Ntt1ehqspNIa51e1PrgIEIW8M0Nml5bHP4C0vIeU= +go.opentelemetry.io/collector/confmap/xconfmap v0.146.0 h1:eUPAtbLKL58dYnaxCHfOf6ASoYPe2CdnnHX5GofhKsA= +go.opentelemetry.io/collector/confmap/xconfmap v0.146.0/go.mod h1:2HEaRoKD+CvIhYRHccflgfXvdTMEEf7b2KTkAvvSm+0= +go.opentelemetry.io/collector/connector v0.146.0 h1:74tI0HH0xLWPoweczbjjZGB+M+SyrYDfVS2QkEFS8S4= +go.opentelemetry.io/collector/connector v0.146.0/go.mod h1:AbWLbAHQNB2Roim8MQzz2PW3bXmoo0NoZEVkVziRctE= +go.opentelemetry.io/collector/connector/connectortest v0.146.0 h1:SzEuAQF4hbZeqG0KpawT5OBjNdWLgFDH1eVSyIuC3dw= +go.opentelemetry.io/collector/connector/connectortest v0.146.0/go.mod h1:ZUJJXufJLbQOHPdEpu4SRCipoSfKhob/MJXtaOwQ5Uc= +go.opentelemetry.io/collector/connector/xconnector v0.146.0 h1:pngzd1Yt0sZrMdtpDUCe+wnwXG89/f0rGOJMWrKFjpA= +go.opentelemetry.io/collector/connector/xconnector v0.146.0/go.mod h1:iyMNWJOlBxnqJPzLlZji9RIc+oH9OkqRaNLD6zUdmqU= +go.opentelemetry.io/collector/consumer v1.51.0 h1:Ex1x/k9VEEA2DOgt/eSc2Z9KTp0I6xBSruLmrYFfIFY= +go.opentelemetry.io/collector/consumer v1.51.0/go.mod h1:Erk6qdfVj+24QTrGCpurcrF+qdUlHkb4dgMy5wJxLvY= +go.opentelemetry.io/collector/consumer/consumererror v0.146.0 h1:T8GMl87NX/dQVNvJcT34/WtC2vZwf+KW5uyGaV5Uwwg= +go.opentelemetry.io/collector/consumer/consumererror v0.146.0/go.mod h1:dGBuApkHLvzRBoA7ZUgtcyNj1KgIqVCNHDPOrXhY+bs= +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.146.0 h1:xQuH7P4A348xDht9XrDrmWjfUK8S5RNn3T+/3AuO80g= +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.146.0/go.mod h1:SSkA7p/AWaVVG6K6DtmfyHlfOBqNswD4seB/dtp97dk= +go.opentelemetry.io/collector/consumer/consumertest v0.146.0 h1:zdPPhhjLM0YVVzcQv7lLcqWHUUsBpbo07rh5L6g9GqM= +go.opentelemetry.io/collector/consumer/consumertest v0.146.0/go.mod h1:vbixywaz907kGabycyCR8CNMNstEC31POIGNnVTRdAw= +go.opentelemetry.io/collector/consumer/xconsumer v0.146.0 h1:Qj3lkLcUwdG8+PEghXrWQvKEP7Dqr/suPeLyp3WR4ug= +go.opentelemetry.io/collector/consumer/xconsumer v0.146.0/go.mod h1:NVEA5pLfZ627EOZu5UkLR1BSMTGeWCJNebcDJSSTyXg= +go.opentelemetry.io/collector/exporter v1.51.0 h1:oHthB4EgSxPJtEC5hqBavZNSz2emBErDkDwfTN1aZVI= +go.opentelemetry.io/collector/exporter v1.51.0/go.mod h1:iJKSvK856xicuLbGuX1ni/TNSt2YZTQDZ4J6sYHY6cI= +go.opentelemetry.io/collector/exporter/exporterhelper v0.146.0 h1:VG1vFp4h02oYCDY7YxGPI4JmNhK+I/7oXk30xuyhs7w= +go.opentelemetry.io/collector/exporter/exporterhelper v0.146.0/go.mod h1:ii0VPaHT5G7/UhYhHCK8D5Z5Ju5+ZvONsUUFw/iTlEY= +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.146.0 h1:v4zEMQ6yhmWZKr8RLfIq/IbdLs3qqCQYhYQnUPhNKGQ= +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.146.0/go.mod h1:DfhRLI+Ltaju+ggpMcG8wnioBXvAvCCtjrYaWQLurmY= +go.opentelemetry.io/collector/exporter/exportertest v0.146.0 h1:tzhimxbgmf+44HGYfaZYOiHG/l7nDPSg4foZlQQYZ9w= +go.opentelemetry.io/collector/exporter/exportertest v0.146.0/go.mod h1:UXDRG1FCsjD1oBb88DMWoAFxk+pTtO5P4a0QfNnOnVM= +go.opentelemetry.io/collector/exporter/otlpexporter v0.146.0 h1:RjQlikUpFQX+lD0Bg0n/TzTKINBJpCnkSbVw6ND+3Lc= +go.opentelemetry.io/collector/exporter/otlpexporter v0.146.0/go.mod h1:mBYN0q+6XV09tbEVMfOeQgBXWFBHOA1xCvGg+n4Dc9w= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.146.0 h1:oZPMSlV+NgjrCWAGTJn4TSKXHFKlaRVAa2UxwmhqVmk= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.146.0/go.mod h1:9IQSOchkEDu6r4+KDoWAkrxGotoBYs41IEPNnn4SqWY= +go.opentelemetry.io/collector/exporter/xexporter v0.146.0 h1:qsRviawuSk1G1qGvXG8k+NKTA2nEHNGvT5IcYYmJtWA= +go.opentelemetry.io/collector/exporter/xexporter v0.146.0/go.mod h1:iGy2IEhOu6FXCgvjOcVeacGR2sCFDnRCN5OndALsrSE= +go.opentelemetry.io/collector/extension v1.51.0 h1:NWYhvGRHHK+g1WdHqVdFuKsDtIfYoudfJ0dC6TbIfWE= +go.opentelemetry.io/collector/extension v1.51.0/go.mod h1:y5Z0djLtw0QZb8CJQv8JpeObx9bfAnw3yeu1yoKhyaA= +go.opentelemetry.io/collector/extension/extensionauth v1.51.0 h1:ox3nzKx8a/6Rf2DiuK6qUDIYbXK4frW0INZoPTFY7Xw= +go.opentelemetry.io/collector/extension/extensionauth v1.51.0/go.mod h1:alIyB3zBUOvIEn/DaAdLMFWtz9Zw4UYt1iHO0lMy5XU= +go.opentelemetry.io/collector/extension/extensionauth/extensionauthtest v0.146.0 h1:1pr8vxEhHMSHGLppid69wl3IgNaxd7VKmBU8H+r7exo= +go.opentelemetry.io/collector/extension/extensionauth/extensionauthtest v0.146.0/go.mod h1:XJMth6pec376ZwkfLzZqExwxAF+aUzZokxxnuV49tws= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.146.0 h1:ttSXMelc1iEr9Smyrffn0eYNdjDy0ljzD5neTGp9FRo= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.146.0/go.mod h1:pXHq4DE/gASJ87cFbHLR+te17IsKxKgxX3yvaV0nChg= +go.opentelemetry.io/collector/extension/extensionmiddleware v0.146.0 h1:2tMH9BnHKJHMuAuJFl+V3W/jYx8UPRcdcP7ihGU0hjA= +go.opentelemetry.io/collector/extension/extensionmiddleware v0.146.0/go.mod h1:Ka+BXI1AQazPaI/zBCU6VF1dQVBD3tg4Ob8VqBb6T9U= +go.opentelemetry.io/collector/extension/extensionmiddleware/extensionmiddlewaretest v0.146.0 h1:mTcfTFCeDbsVxxEPJB/6s+gynJ4MUf1IVxpvE4tHq70= +go.opentelemetry.io/collector/extension/extensionmiddleware/extensionmiddlewaretest v0.146.0/go.mod h1:uqAa4AvOnfABJNZ3ES1AiakVwZ4hSnX8sQUEo2TI2SA= +go.opentelemetry.io/collector/extension/extensiontest v0.146.0 h1:mlVCOW/MY26ljq5nDQO320E95h3CTZ0mvrmE40ScZI0= +go.opentelemetry.io/collector/extension/extensiontest v0.146.0/go.mod h1:H0RE40oi8NKP1EeQRhMyqgkgkUOa7X49w5apyxV2wFM= +go.opentelemetry.io/collector/extension/xextension v0.146.0 h1:m6f608wMOR0XQyAiLHhA3kDsvkr7GvS9OlhqJ/mMjqg= +go.opentelemetry.io/collector/extension/xextension v0.146.0/go.mod h1:ZTj9SBN97Etu6WSP9jos1+0cHWxV+szCEIsIUYg1dKY= +go.opentelemetry.io/collector/extension/zpagesextension v0.146.0 h1:bDWG94mC/42hGisnwUr4tNmyr00jXOPy/+MQ3p/pY+4= +go.opentelemetry.io/collector/extension/zpagesextension v0.146.0/go.mod h1:+GI4LFMS4CVZ1YOvG3BVi1KizWKT4i21nxGsYthCw6E= +go.opentelemetry.io/collector/featuregate v1.51.0 h1:dxJuv/3T84dhNKp7fz5+8srHz1dhquGzDpLW4OZTFBw= +go.opentelemetry.io/collector/featuregate v1.51.0/go.mod h1:/1bclXgP91pISaEeNulRxzzmzMTm4I5Xih2SnI4HRSo= +go.opentelemetry.io/collector/internal/componentalias v0.146.0 h1:+83X5W49HGfhgB2tHgCeeohkrTcJGOuCv/NlmdbSNEM= +go.opentelemetry.io/collector/internal/componentalias v0.146.0/go.mod h1:3nqCHMFFwJNLmNS2+Frq9wJCM3PA7TQJam0upcwXGlw= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.146.0 h1:zCX1D5UXTr9mbHnqd/uq4yxgqLoIfeB6tktuRuvcA4U= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.146.0/go.mod h1:d82rSwQEaUBhROaV/IKXftGRPF67RXXK9SoAI/8zPHA= +go.opentelemetry.io/collector/internal/sharedcomponent v0.146.0 h1:g8oEDyGVERLpyegLFtWFA0wtbLxrtA/1+nDaQhKEsLo= +go.opentelemetry.io/collector/internal/sharedcomponent v0.146.0/go.mod h1:B4D5E7WAHTvjGkwpU79U3hIxll8pqm/j22K6dzvRl54= +go.opentelemetry.io/collector/internal/telemetry v0.146.0 h1:5uc4c+3d+dwYUXcxjPF52ysXibIyJn3XPWnXHpqL2RY= +go.opentelemetry.io/collector/internal/telemetry v0.146.0/go.mod h1:R65iQ5B6/AalN/RC+mpibJgBswObcAeH4kUOrq9r7Tg= +go.opentelemetry.io/collector/internal/testutil v0.146.0 h1:e7X+Sr6qVBfjnzR+JeeUUhNopQ1Scm/cFSQRQtoz+8Q= +go.opentelemetry.io/collector/internal/testutil v0.146.0/go.mod h1:Jkjs6rkqs973LqgZ0Fe3zrokQRKULYXPIf4HuqStiEE= +go.opentelemetry.io/collector/otelcol v0.146.0 h1:o7EveZmmNvDS3O7rMzHiN5s2EDtSOHIAxkJu9YnmRp8= +go.opentelemetry.io/collector/otelcol v0.146.0/go.mod h1:9UE9mHQqHcQ7fTqroB5lL/BeLOt5yK22GU8pfyvvAow= +go.opentelemetry.io/collector/pdata v1.51.0 h1:DnDhSEuDXNdzGRB7f6oOfXpbDApwBX3tY+3K69oUrDA= +go.opentelemetry.io/collector/pdata v1.51.0/go.mod h1:GoX1bjKDR++mgFKdT7Hynv9+mdgQ1DDXbjs7/Ww209Q= +go.opentelemetry.io/collector/pdata/pprofile v0.146.0 h1:sr4Z8R/OzPyqH/Zp82UKBqkdTPYanx8LVhy0QWEhJqA= +go.opentelemetry.io/collector/pdata/pprofile v0.146.0/go.mod h1:SIES4PQvrB/tMdg5vLDHs59Bi4YdynRyvHCWqeC1dvY= +go.opentelemetry.io/collector/pdata/testdata v0.146.0 h1:cD9eDMD+TDpWodvuqtjzolBoyGBFe9XunU5C2ZjlJ4s= +go.opentelemetry.io/collector/pdata/testdata v0.146.0/go.mod h1:5L1pCZ3ydsFi8Ydvsfd8rJW9S/f8n4bxoaGTvespyeE= +go.opentelemetry.io/collector/pdata/xpdata v0.146.0 h1:l4jJCE7VoiCbAvqmaqklMbAEbwdbbGtW5RGiHIA6GTo= +go.opentelemetry.io/collector/pdata/xpdata v0.146.0/go.mod h1:V5Q5e3TbMtU4Ri2RsHHavy24EaVO0mmofrpxFosCMPE= +go.opentelemetry.io/collector/pipeline v1.51.0 h1:GZBNW+aaOE+zufGzAkXy0OI7n1cqepEa5J+beaOpS2k= +go.opentelemetry.io/collector/pipeline v1.51.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI= +go.opentelemetry.io/collector/pipeline/xpipeline v0.146.0 h1:tCK73+SEZoh1mthOyJE931LflAt5jJYu2rF+VnEN/1w= +go.opentelemetry.io/collector/pipeline/xpipeline v0.146.0/go.mod h1:5mn1mfBDE+J3ohtkd7109qvtaxswRFS1L1HxOc+k06U= +go.opentelemetry.io/collector/processor v1.51.0 h1:PKpCzkLQmqaW08TOVh/zM0qx07Ihq+DR5J/OBkPiL9o= +go.opentelemetry.io/collector/processor v1.51.0/go.mod h1:rtIPFS+EFRAkG+CSwtjxs2IsIkuZStObvALeueD02XI= go.opentelemetry.io/collector/processor/processorhelper v0.138.0 h1:Affdz4mJdjE6iJMWO6IpLcorBr1E+HFbo3/ok194Qc4= go.opentelemetry.io/collector/processor/processorhelper v0.138.0/go.mod h1:QS6FzV/0/4kN3VPIYA+FPMuKkJnXnxvGKdllz2Fuopw= go.opentelemetry.io/collector/processor/processorhelper/xprocessorhelper v0.138.0 h1:JojL1OHoKQpqZ5dyi4sJ44+sk9hbmwkV8WIElI3XJ+I= go.opentelemetry.io/collector/processor/processorhelper/xprocessorhelper v0.138.0/go.mod h1:oMKZdW8U17c9TpFpBAqOMguwqnX9L4Invgh2SG5CKA0= -go.opentelemetry.io/collector/processor/processortest v0.138.0 h1:WSHPESV1NqPHlt9ShzTlc9y7ZLf83223fyfC4wzJoZg= -go.opentelemetry.io/collector/processor/processortest v0.138.0/go.mod h1:h+rFcy+svVipVVpAkellP5egcPYsHeOfL3o7lkFNsGs= -go.opentelemetry.io/collector/processor/xprocessor v0.138.0 h1:V+zKVy2kstPhIDsGvEBIRUxq8HzAdG1zdJP/hAuwENQ= -go.opentelemetry.io/collector/processor/xprocessor v0.138.0/go.mod h1:0Ybup3sw+eJkB0Jn1HID/LPNvTo33ur61ArHYq7Nozo= -go.opentelemetry.io/collector/receiver v1.44.0 h1:oPgHg7u+aqplnVTLyC3FapTsAE7BiGdTtDceE1BuTJg= -go.opentelemetry.io/collector/receiver v1.44.0/go.mod h1:NzkrGOIoWigOG54eF92ZGfJ8oSWhqGHTT0ZCGaH5NMc= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.138.0 h1:lYgKvKIm1/6XAVO55C7wBCocalhimBpjlXx1kHyC2No= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.138.0/go.mod h1:Pyquve9PvbQcbzoIPvEd25LDwrYgFAjiIgGIXma2k/M= -go.opentelemetry.io/collector/receiver/receiverhelper v0.138.0 h1:aEgyMilBJ2FoWQ+U4m28lzjmTP2UteDAIO96jRsPHmM= -go.opentelemetry.io/collector/receiver/receiverhelper v0.138.0/go.mod h1:WxMvaPgL9MWrIKjDiZ/SmopEXAX+sO9CD/SfXI9J63A= -go.opentelemetry.io/collector/receiver/receivertest v0.138.0 h1:K6kZ/epuAjjCCr1UMzNFyx1rynFSc+ifMXt5C/hWcXI= -go.opentelemetry.io/collector/receiver/receivertest v0.138.0/go.mod h1:p3cGSplwwp71r7R6u0e8N0rP/mmPsFjJ4WFV2Bhv7os= -go.opentelemetry.io/collector/receiver/xreceiver v0.138.0 h1:wspJazZc4htPBT08JpUI6gq+qeUUxSOhxXwWGn+QnlM= -go.opentelemetry.io/collector/receiver/xreceiver v0.138.0/go.mod h1:+S/AsbEs1geUt3B+HAhdSjd+3hPkjtmcSBltKwpCBik= +go.opentelemetry.io/collector/processor/processortest v0.146.0 h1:Ee1z57TR5RI0BwAimZ+M16Xv4kSJ4e6AqT61M7e/+ww= +go.opentelemetry.io/collector/processor/processortest v0.146.0/go.mod h1:IhdLspxFrudXxs7ouPltiY9wQyWyR7YywY+mS7p9+Z8= +go.opentelemetry.io/collector/processor/xprocessor v0.146.0 h1:PvoGMUg6DBri9tx0QpHjVYKVCHS4f4r7iFTVWlNQVxI= +go.opentelemetry.io/collector/processor/xprocessor v0.146.0/go.mod h1:zUMVosdk9AKqrRI3w7Afd/ULrQFGNw+a7qPo6cIkmUo= +go.opentelemetry.io/collector/receiver v1.51.0 h1:BUEHfN3HSvR3YzPzJOLOotPyJlILi2D4WkGzNPNuDlA= +go.opentelemetry.io/collector/receiver v1.51.0/go.mod h1:NrkCdesDdxt6bjSVU2J+UsQxDvOUMIe/XdhnexaqAic= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.146.0 h1:if5NEoADIir5rjzNSAb9akgSmf+mMAfm00YeXloQ0R4= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.146.0/go.mod h1:ewFD0cI2rjzi4+BGdlcPB1KhpH1MT97XLu+ici7PsYE= +go.opentelemetry.io/collector/receiver/receiverhelper v0.146.0 h1:k7ALEFs7qqFKx/EcJxr8POo738Ce//5yecMyVO0/LUk= +go.opentelemetry.io/collector/receiver/receiverhelper v0.146.0/go.mod h1:bgix03XMPe2aYlVD2djGyVnbXWvfmbd/Tb0LBMZYgEY= +go.opentelemetry.io/collector/receiver/receivertest v0.146.0 h1:kFMUQtdXG1ms3NCRJnxzF/5s+DlryCvIajlRdHawlzA= +go.opentelemetry.io/collector/receiver/receivertest v0.146.0/go.mod h1:idFY+2kjD2zx0ggVyHs5iK8EayalMEgBrR3jVOyixXQ= +go.opentelemetry.io/collector/receiver/xreceiver v0.146.0 h1:Q20OxzQpq3MDdorNCvFvysnR30xKZ4m5Eqi05V/Oq8o= +go.opentelemetry.io/collector/receiver/xreceiver v0.146.0/go.mod h1:wh+QsLDUvOak0eqv8Qf25eTrLfzYpnq/6ZcTHGbGJ7w= go.opentelemetry.io/collector/semconv v0.128.1-0.20250610090210-188191247685 h1:XCN7qkZRNzRYfn6chsMZkbFZxoFcW6fZIsZs2aCzcbc= go.opentelemetry.io/collector/semconv v0.128.1-0.20250610090210-188191247685/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns= -go.opentelemetry.io/collector/service v0.138.0 h1:ubOa9S3Wdv6hHkoXCuPfidtgUVGIUYY8+SpoM7shAB8= -go.opentelemetry.io/collector/service v0.138.0/go.mod h1:EEsuXliw8X+7R68TsJ5Z5uCBmHNTOK5iutBCY/Z6+vg= -go.opentelemetry.io/collector/service/hostcapabilities v0.138.0 h1:0Zs/cP3qy/6nIer9DxEJ6r40F6JdcamivhdEzHCToT4= -go.opentelemetry.io/collector/service/hostcapabilities v0.138.0/go.mod h1:Jf/g1Et9Uqk8ZSw4kIUh29Ki+vUT7xX1w9a6+SDX1bs= -go.opentelemetry.io/collector/service/telemetry/telemetrytest v0.138.0 h1:QewMMSZWLzk6Mx8OBiE5bJGdrCij7mXrMZeym1b38cw= -go.opentelemetry.io/collector/service/telemetry/telemetrytest v0.138.0/go.mod h1:5H1FcdgmBBnMKH8x8a1SnadsHeCXHewu2z3/Tqm+Diw= -go.opentelemetry.io/contrib/bridges/otelzap v0.13.0 h1:aBKdhLVieqvwWe9A79UHI/0vgp2t/s2euY8X59pGRlw= -go.opentelemetry.io/contrib/bridges/otelzap v0.13.0/go.mod h1:SYqtxLQE7iINgh6WFuVi2AI70148B8EI35DSk0Wr8m4= +go.opentelemetry.io/collector/service v0.146.0 h1:vPvvwjkp8qkwmtsjAohsOZL3N46rglyJeHiaWcBTF+g= +go.opentelemetry.io/collector/service v0.146.0/go.mod h1:mcqFKprTK6cevd4Nz06dwT1+rtRVVC2r3jOMHhE8V4Y= +go.opentelemetry.io/collector/service/hostcapabilities v0.146.0 h1:qgio4i4mSkm2PUdKU1gUm+lPQy0Tq7gXTuMd53wpKlU= +go.opentelemetry.io/collector/service/hostcapabilities v0.146.0/go.mod h1:wjJ/FLlsQcx4nuOcwZK92IMotvoOY/STfTjlTn6zFm0= +go.opentelemetry.io/collector/service/telemetry/telemetrytest v0.146.0 h1:oMsLpd84wY0eRybA6TO1TAQBDhrZpGikztQc3BMqFXk= +go.opentelemetry.io/collector/service/telemetry/telemetrytest v0.146.0/go.mod h1:ms/NfxVvYZDD9dSqOBq8aY220dmRtYXLyLXVCcwuVy0= go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 h1:/Rij/t18Y7rUayNg7Id6rPrEnHgorxYabm2E6wUdPP4= go.opentelemetry.io/contrib/bridges/prometheus v0.63.0/go.mod h1:AdyDPn6pkbkt2w01n3BubRVk7xAsCRq1Yg1mpfyA/0E= go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= @@ -1147,8 +1147,6 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= go.opentelemetry.io/contrib/otelconf v0.18.0 h1:ciF2Gf00BWs0DnexKFZXcxg9kJ8r3SUW1LOzW3CsKA8= go.opentelemetry.io/contrib/otelconf v0.18.0/go.mod h1:FcP7k+JLwBLdOxS6qY6VQ/4b5VBntI6L6o80IMwhAeI= -go.opentelemetry.io/contrib/propagators/b3 v1.38.0 h1:uHsCCOSKl0kLrV2dLkFK+8Ywk9iKa/fptkytc6aFFEo= -go.opentelemetry.io/contrib/propagators/b3 v1.38.0/go.mod h1:wMRSZJZcY8ya9mApLLhwIMjqmApy2o/Ml+62lhvxyHU= go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 h1:UIrZgRBHUrYRlJ4V419lVb4rs2ar0wFzKNAebaP05XU= go.opentelemetry.io/contrib/propagators/jaeger v1.35.0/go.mod h1:0ciyFyYZxE6JqRAQvIgGRabKWDUmNdW3GAQb6y/RlFU= go.opentelemetry.io/contrib/samplers/jaegerremote v0.31.0 h1:l8XCsDh7L6Z7PB+vlw1s4ufNab+ayT2RMNdvDE/UyPc= @@ -1182,10 +1180,8 @@ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0 h1:5gn2urDL/FBnK8 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0/go.mod h1:0fBG6ZJxhqByfFZDwSwpZGzJU671HkwpWaNe2t4VUPI= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= -go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM= -go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno= -go.opentelemetry.io/otel/log/logtest v0.14.0 h1:BGTqNeluJDK2uIHAY8lRqxjVAYfqgcaTbVk1n3MWe5A= -go.opentelemetry.io/otel/log/logtest v0.14.0/go.mod h1:IuguGt8XVP4XA4d2oEEDMVDBBCesMg8/tSGWDjuKfoA= +go.opentelemetry.io/otel/log v0.16.0 h1:DeuBPqCi6pQwtCK0pO4fvMB5eBq6sNxEnuTs88pjsN4= +go.opentelemetry.io/otel/log v0.16.0/go.mod h1:rWsmqNVTLIA8UnwYVOItjyEZDbKIkMxdQunsIhpUMes= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= @@ -1203,12 +1199,12 @@ go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZY go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= -go.opentelemetry.io/proto/slim/otlp v1.8.0 h1:afcLwp2XOeCbGrjufT1qWyruFt+6C9g5SOuymrSPUXQ= -go.opentelemetry.io/proto/slim/otlp v1.8.0/go.mod h1:Yaa5fjYm1SMCq0hG0x/87wV1MP9H5xDuG/1+AhvBcsI= -go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.1.0 h1:Uc+elixz922LHx5colXGi1ORbsW8DTIGM+gg+D9V7HE= -go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.1.0/go.mod h1:VyU6dTWBWv6h9w/+DYgSZAPMabWbPTFTuxp25sM8+s0= -go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.1.0 h1:i8YpvWGm/Uq1koL//bnbJ/26eV3OrKWm09+rDYo7keU= -go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.1.0/go.mod h1:pQ70xHY/ZVxNUBPn+qUWPl8nwai87eWdqL3M37lNi9A= +go.opentelemetry.io/proto/slim/otlp v1.9.0 h1:fPVMv8tP3TrsqlkH1HWYUpbCY9cAIemx184VGkS6vlE= +go.opentelemetry.io/proto/slim/otlp v1.9.0/go.mod h1:xXdeJJ90Gqyll+orzUkY4bOd2HECo5JofeoLpymVqdI= +go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0 h1:o13nadWDNkH/quoDomDUClnQBpdQQ2Qqv0lQBjIXjE8= +go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0/go.mod h1:Gyb6Xe7FTi/6xBHwMmngGoHqL0w29Y4eW8TGFzpefGA= +go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0 h1:EiUYvtwu6PMrMHVjcPfnsG3v+ajPkbUeH+IL93+QYyk= +go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0/go.mod h1:mUUHKFiN2SST3AhJ8XhJxEoeVW12oqfXog0Bo8W3Ec4= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -1455,8 +1451,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= +gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.267.0 h1:w+vfWPMPYeRs8qH1aYYsFX68jMls5acWl/jocfLomwE= diff --git a/vendor/github.com/ebitengine/purego/func.go b/vendor/github.com/ebitengine/purego/func.go index 4662e064020..ee611b91f7f 100644 --- a/vendor/github.com/ebitengine/purego/func.go +++ b/vendor/github.com/ebitengine/purego/func.go @@ -286,7 +286,7 @@ func RegisterFunc(fptr any, cfn uintptr) { for j, val := range args[i:] { if val.Kind() == reflect.String { - ptr := strings.CString(v.String()) + ptr := strings.CString(val.String()) keepAlive = append(keepAlive, ptr) val = reflect.ValueOf(ptr) args[i+j] = val diff --git a/vendor/github.com/ebitengine/purego/struct_amd64.go b/vendor/github.com/ebitengine/purego/struct_amd64.go index bd6c977bf4f..c4c2ad8f582 100644 --- a/vendor/github.com/ebitengine/purego/struct_amd64.go +++ b/vendor/github.com/ebitengine/purego/struct_amd64.go @@ -165,7 +165,7 @@ func tryPlaceRegister(v reflect.Value, addFloat func(uintptr), addInt func(uintp place(f) case reflect.Bool: if f.Bool() { - val |= 1 + val |= 1 << shift } shift += 8 class |= _INTEGER diff --git a/vendor/github.com/ebitengine/purego/struct_arm64.go b/vendor/github.com/ebitengine/purego/struct_arm64.go index 6c73e98fe03..8605e77bdb1 100644 --- a/vendor/github.com/ebitengine/purego/struct_arm64.go +++ b/vendor/github.com/ebitengine/purego/struct_arm64.go @@ -117,13 +117,15 @@ func placeRegisters(v reflect.Value, addFloat func(uintptr), addInt func(uintptr } else { addInt(uintptr(val)) } + val = 0 + class = _NO_CLASS } switch f.Type().Kind() { case reflect.Struct: place(f) case reflect.Bool: if f.Bool() { - val |= 1 + val |= 1 << shift } shift += 8 class |= _INT diff --git a/vendor/github.com/ebitengine/purego/struct_loong64.go b/vendor/github.com/ebitengine/purego/struct_loong64.go index 69f954f046c..da7f1a1508b 100644 --- a/vendor/github.com/ebitengine/purego/struct_loong64.go +++ b/vendor/github.com/ebitengine/purego/struct_loong64.go @@ -104,7 +104,7 @@ func placeRegisters(v reflect.Value, addFloat func(uintptr), addInt func(uintptr place(f) case reflect.Bool: if f.Bool() { - val |= 1 + val |= 1 << shift } shift += 8 class |= _INT diff --git a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig index faef0c91e7e..c37602a02da 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig +++ b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig @@ -19,3 +19,6 @@ indent_size = 2 [.golangci.yaml] indent_size = 2 + +[devenv.yaml] +indent_size = 2 diff --git a/vendor/github.com/go-viper/mapstructure/v2/.envrc b/vendor/github.com/go-viper/mapstructure/v2/.envrc index 2e0f9f5f711..e2be8891cb6 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/.envrc +++ b/vendor/github.com/go-viper/mapstructure/v2/.envrc @@ -1,4 +1,7 @@ -if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" -fi -use flake . --impure +#!/usr/bin/env bash + +export DIRENV_WARN_TIMEOUT=20s + +eval "$(devenv direnvrc)" + +use devenv diff --git a/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/vendor/github.com/go-viper/mapstructure/v2/.gitignore index 470e7ca2bd2..71caea19ff0 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/.gitignore +++ b/vendor/github.com/go-viper/mapstructure/v2/.gitignore @@ -1,6 +1,10 @@ -/.devenv/ -/.direnv/ -/.pre-commit-config.yaml /bin/ /build/ /var/ + +# Devenv +.devenv* +devenv.local.nix +devenv.local.yaml +.direnv +.pre-commit-config.yaml diff --git a/vendor/github.com/go-viper/mapstructure/v2/devenv.lock b/vendor/github.com/go-viper/mapstructure/v2/devenv.lock new file mode 100644 index 00000000000..72c2c9b4db8 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/devenv.lock @@ -0,0 +1,103 @@ +{ + "nodes": { + "devenv": { + "locked": { + "dir": "src/modules", + "lastModified": 1765288076, + "owner": "cachix", + "repo": "devenv", + "rev": "93c055af1e8fcac49251f1b2e1c57f78620ad351", + "type": "github" + }, + "original": { + "dir": "src/modules", + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1765121682, + "owner": "edolstra", + "repo": "flake-compat", + "rev": "65f23138d8d09a92e30f1e5c87611b23ef451bf3", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "git-hooks": { + "inputs": { + "flake-compat": "flake-compat", + "gitignore": "gitignore", + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1765016596, + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "548fc44fca28a5e81c5d6b846e555e6b9c2a5a3c", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "git-hooks.nix", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "git-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1762808025, + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "cb5e3fdca1de58ccbc3ef53de65bd372b48f567c", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1764580874, + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "dcf61356c3ab25f1362b4a4428a6d871e84f1d1d", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "git-hooks": "git-hooks", + "nixpkgs": "nixpkgs", + "pre-commit-hooks": [ + "git-hooks" + ] + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/devenv.nix b/vendor/github.com/go-viper/mapstructure/v2/devenv.nix new file mode 100644 index 00000000000..b31ab7a1ffb --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/devenv.nix @@ -0,0 +1,14 @@ +{ + pkgs, + ... +}: + +{ + languages = { + go.enable = true; + }; + + packages = with pkgs; [ + golangci-lint + ]; +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/devenv.yaml b/vendor/github.com/go-viper/mapstructure/v2/devenv.yaml new file mode 100644 index 00000000000..68616a49cdd --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/devenv.yaml @@ -0,0 +1,4 @@ +# yaml-language-server: $schema=https://devenv.sh/devenv.schema.json +inputs: + nixpkgs: + url: github:cachix/devenv-nixpkgs/rolling diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock deleted file mode 100644 index 5e67bdd6b42..00000000000 --- a/vendor/github.com/go-viper/mapstructure/v2/flake.lock +++ /dev/null @@ -1,294 +0,0 @@ -{ - "nodes": { - "cachix": { - "inputs": { - "devenv": [ - "devenv" - ], - "flake-compat": [ - "devenv" - ], - "git-hooks": [ - "devenv" - ], - "nixpkgs": "nixpkgs" - }, - "locked": { - "lastModified": 1742042642, - "narHash": "sha256-D0gP8srrX0qj+wNYNPdtVJsQuFzIng3q43thnHXQ/es=", - "owner": "cachix", - "repo": "cachix", - "rev": "a624d3eaf4b1d225f918de8543ed739f2f574203", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "latest", - "repo": "cachix", - "type": "github" - } - }, - "devenv": { - "inputs": { - "cachix": "cachix", - "flake-compat": "flake-compat", - "git-hooks": "git-hooks", - "nix": "nix", - "nixpkgs": "nixpkgs_3" - }, - "locked": { - "lastModified": 1744876578, - "narHash": "sha256-8MTBj2REB8t29sIBLpxbR0+AEGJ7f+RkzZPAGsFd40c=", - "owner": "cachix", - "repo": "devenv", - "rev": "7ff7c351bba20d0615be25ecdcbcf79b57b85fe1", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1733328505, - "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-parts": { - "inputs": { - "nixpkgs-lib": [ - "devenv", - "nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1712014858, - "narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "9126214d0a59633752a136528f5f3b9aa8565b7d", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "flake-parts_2": { - "inputs": { - "nixpkgs-lib": "nixpkgs-lib" - }, - "locked": { - "lastModified": 1743550720, - "narHash": "sha256-hIshGgKZCgWh6AYJpJmRgFdR3WUbkY04o82X05xqQiY=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "c621e8422220273271f52058f618c94e405bb0f5", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "git-hooks": { - "inputs": { - "flake-compat": [ - "devenv" - ], - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1742649964, - "narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=", - "owner": "cachix", - "repo": "git-hooks.nix", - "rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "git-hooks.nix", - "type": "github" - } - }, - "gitignore": { - "inputs": { - "nixpkgs": [ - "devenv", - "git-hooks", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1709087332, - "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", - "owner": "hercules-ci", - "repo": "gitignore.nix", - "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "gitignore.nix", - "type": "github" - } - }, - "libgit2": { - "flake": false, - "locked": { - "lastModified": 1697646580, - "narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=", - "owner": "libgit2", - "repo": "libgit2", - "rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5", - "type": "github" - }, - "original": { - "owner": "libgit2", - "repo": "libgit2", - "type": "github" - } - }, - "nix": { - "inputs": { - "flake-compat": [ - "devenv" - ], - "flake-parts": "flake-parts", - "libgit2": "libgit2", - "nixpkgs": "nixpkgs_2", - "nixpkgs-23-11": [ - "devenv" - ], - "nixpkgs-regression": [ - "devenv" - ], - "pre-commit-hooks": [ - "devenv" - ] - }, - "locked": { - "lastModified": 1741798497, - "narHash": "sha256-E3j+3MoY8Y96mG1dUIiLFm2tZmNbRvSiyN7CrSKuAVg=", - "owner": "domenkozar", - "repo": "nix", - "rev": "f3f44b2baaf6c4c6e179de8cbb1cc6db031083cd", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.24", - "repo": "nix", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1733212471, - "narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "55d15ad12a74eb7d4646254e13638ad0c4128776", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "lastModified": 1743296961, - "narHash": "sha256-b1EdN3cULCqtorQ4QeWgLMrd5ZGOjLSLemfa00heasc=", - "owner": "nix-community", - "repo": "nixpkgs.lib", - "rev": "e4822aea2a6d1cdd36653c134cacfd64c97ff4fa", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nixpkgs.lib", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1717432640, - "narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "88269ab3044128b7c2f4c7d68448b2fb50456870", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "release-24.05", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_3": { - "locked": { - "lastModified": 1733477122, - "narHash": "sha256-qamMCz5mNpQmgBwc8SB5tVMlD5sbwVIToVZtSxMph9s=", - "owner": "cachix", - "repo": "devenv-nixpkgs", - "rev": "7bd9e84d0452f6d2e63b6e6da29fe73fac951857", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "rolling", - "repo": "devenv-nixpkgs", - "type": "github" - } - }, - "nixpkgs_4": { - "locked": { - "lastModified": 1744536153, - "narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "root": { - "inputs": { - "devenv": "devenv", - "flake-parts": "flake-parts_2", - "nixpkgs": "nixpkgs_4" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/vendor/github.com/go-viper/mapstructure/v2/flake.nix deleted file mode 100644 index 3b116f426d4..00000000000 --- a/vendor/github.com/go-viper/mapstructure/v2/flake.nix +++ /dev/null @@ -1,46 +0,0 @@ -{ - inputs = { - nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - flake-parts.url = "github:hercules-ci/flake-parts"; - devenv.url = "github:cachix/devenv"; - }; - - outputs = - inputs@{ flake-parts, ... }: - flake-parts.lib.mkFlake { inherit inputs; } { - imports = [ - inputs.devenv.flakeModule - ]; - - systems = [ - "x86_64-linux" - "x86_64-darwin" - "aarch64-darwin" - ]; - - perSystem = - { pkgs, ... }: - rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - }; - - pre-commit.hooks = { - nixpkgs-fmt.enable = true; - }; - - packages = with pkgs; [ - golangci-lint - ]; - - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - - ci = devenv.shells.default; - }; - }; - }; -} diff --git a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go index 7c35bce0202..9087fd96c79 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go +++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go @@ -173,6 +173,25 @@ // Public: "I made it through!" // } // +// # Custom Decoding with Unmarshaler +// +// Types can implement the Unmarshaler interface to control their own decoding. The interface +// behaves similarly to how UnmarshalJSON does in the standard library. It can be used as an +// alternative or companion to a DecodeHook. +// +// type TrimmedString string +// +// func (t *TrimmedString) UnmarshalMapstructure(input any) error { +// str, ok := input.(string) +// if !ok { +// return fmt.Errorf("expected string, got %T", input) +// } +// *t = TrimmedString(strings.TrimSpace(str)) +// return nil +// } +// +// See the Unmarshaler interface documentation for more details. +// // # Other Configuration // // mapstructure is highly configurable. See the DecoderConfig struct @@ -218,6 +237,17 @@ type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, any) (any, error) // values. type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (any, error) +// Unmarshaler is the interface implemented by types that can unmarshal +// themselves. UnmarshalMapstructure receives the input data (potentially +// transformed by DecodeHook) and should populate the receiver with the +// decoded values. +// +// The Unmarshaler interface takes precedence over the default decoding +// logic for any type (structs, slices, maps, primitives, etc.). +type Unmarshaler interface { + UnmarshalMapstructure(any) error +} + // DecoderConfig is the configuration that is used to create a new decoder // and allows customization of various aspects of decoding. type DecoderConfig struct { @@ -281,6 +311,13 @@ type DecoderConfig struct { // } Squash bool + // Deep will map structures in slices instead of copying them + // + // type Parent struct { + // Children []Child `mapstructure:",deep"` + // } + Deep bool + // Metadata is the struct that will contain extra metadata about // the decoding. If this is nil, then no metadata will be tracked. Metadata *Metadata @@ -290,9 +327,15 @@ type DecoderConfig struct { Result any // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" + // defaults to "mapstructure". Multiple tag names can be specified + // as a comma-separated list (e.g., "yaml,json"), and the first + // matching non-empty tag will be used. TagName string + // RootName specifies the name to use for the root element in error messages. For example: + // '' has unset fields: + RootName string + // The option of the value in the tag that indicates a field should // be squashed. This defaults to "squash". SquashTagOption string @@ -304,11 +347,34 @@ type DecoderConfig struct { // MatchName is the function used to match the map key to the struct // field name or tag. Defaults to `strings.EqualFold`. This can be used // to implement case-sensitive tag values, support snake casing, etc. + // + // MatchName is used as a fallback comparison when the direct key lookup fails. + // See also MapFieldName for transforming field names before lookup. MatchName func(mapKey, fieldName string) bool // DecodeNil, if set to true, will cause the DecodeHook (if present) to run // even if the input is nil. This can be used to provide default values. DecodeNil bool + + // MapFieldName is the function used to convert the struct field name to the map's key name. + // + // This is useful for automatically converting between naming conventions without + // explicitly tagging each field. For example, to convert Go's PascalCase field names + // to snake_case map keys: + // + // MapFieldName: func(s string) string { + // return strcase.ToSnake(s) + // } + // + // When decoding from a map to a struct, the transformed field name is used for + // the initial lookup. If not found, MatchName is used as a fallback comparison. + // Explicit struct tags always take precedence over MapFieldName. + MapFieldName func(string) string + + // DisableUnmarshaler, if set to true, disables the use of the Unmarshaler + // interface. Types implementing Unmarshaler will be decoded using the + // standard struct decoding logic instead. + DisableUnmarshaler bool } // A Decoder takes a raw interface value and turns it into structured @@ -445,6 +511,12 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { config.MatchName = strings.EqualFold } + if config.MapFieldName == nil { + config.MapFieldName = func(s string) string { + return s + } + } + result := &Decoder{ config: config, } @@ -458,7 +530,7 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { // Decode decodes the given raw interface to the target pointer specified // by the configuration. func (d *Decoder) Decode(input any) error { - err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + err := d.decode(d.config.RootName, input, reflect.ValueOf(d.config.Result).Elem()) // Retain some of the original behavior when multiple errors ocurr var joinedErr interface{ Unwrap() []error } @@ -540,36 +612,50 @@ func (d *Decoder) decode(name string, input any, outVal reflect.Value) error { var err error addMetaKey := true - switch outputKind { - case reflect.Bool: - err = d.decodeBool(name, input, outVal) - case reflect.Interface: - err = d.decodeBasic(name, input, outVal) - case reflect.String: - err = d.decodeString(name, input, outVal) - case reflect.Int: - err = d.decodeInt(name, input, outVal) - case reflect.Uint: - err = d.decodeUint(name, input, outVal) - case reflect.Float32: - err = d.decodeFloat(name, input, outVal) - case reflect.Complex64: - err = d.decodeComplex(name, input, outVal) - case reflect.Struct: - err = d.decodeStruct(name, input, outVal) - case reflect.Map: - err = d.decodeMap(name, input, outVal) - case reflect.Ptr: - addMetaKey, err = d.decodePtr(name, input, outVal) - case reflect.Slice: - err = d.decodeSlice(name, input, outVal) - case reflect.Array: - err = d.decodeArray(name, input, outVal) - case reflect.Func: - err = d.decodeFunc(name, input, outVal) - default: - // If we reached this point then we weren't able to decode it - return newDecodeError(name, fmt.Errorf("unsupported type: %s", outputKind)) + + // Check if the target implements Unmarshaler and use it if not disabled + unmarshaled := false + if !d.config.DisableUnmarshaler { + if unmarshaler, ok := getUnmarshaler(outVal); ok { + if err = unmarshaler.UnmarshalMapstructure(input); err != nil { + err = newDecodeError(name, err) + } + unmarshaled = true + } + } + + if !unmarshaled { + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Complex64: + err = d.decodeComplex(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return newDecodeError(name, fmt.Errorf("unsupported type: %s", outputKind)) + } } // If we reached here, then we successfully decoded SOMETHING, so @@ -668,7 +754,7 @@ func (d *Decoder) decodeString(name string, data any, val reflect.Value) error { case reflect.Uint8: var uints []uint8 if dataKind == reflect.Array { - uints = make([]uint8, dataVal.Len(), dataVal.Len()) + uints = make([]uint8, dataVal.Len()) for i := range uints { uints[i] = dataVal.Index(i).Interface().(uint8) } @@ -1060,8 +1146,8 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re ) } - tagValue := f.Tag.Get(d.config.TagName) - keyName := f.Name + tagValue, _ := getTagValue(f, d.config.TagName) + keyName := d.config.MapFieldName(f.Name) if tagValue == "" && d.config.IgnoreUntaggedFields { continue @@ -1070,6 +1156,9 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re // If Squash is set in the config, we squash the field down. squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + // If Deep is set in the config, set as default value. + deep := d.config.Deep + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) // Determine the name of the key in the map @@ -1078,12 +1167,12 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re continue } // If "omitempty" is specified in the tag, it ignores empty values. - if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + if strings.Contains(tagValue[index+1:], "omitempty") && isEmptyValue(v) { continue } // If "omitzero" is specified in the tag, it ignores zero values. - if strings.Index(tagValue[index+1:], "omitzero") != -1 && v.IsZero() { + if strings.Contains(tagValue[index+1:], "omitzero") && v.IsZero() { continue } @@ -1103,7 +1192,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re ) } } else { - if strings.Index(tagValue[index+1:], "remain") != -1 { + if strings.Contains(tagValue[index+1:], "remain") { if v.Kind() != reflect.Map { return newDecodeError( name+"."+f.Name, @@ -1118,6 +1207,9 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re continue } } + + deep = deep || strings.Contains(tagValue[index+1:], "deep") + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { keyName = keyNameTagValue } @@ -1164,6 +1256,41 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) } + case reflect.Slice: + if deep { + var childType reflect.Type + switch v.Type().Elem().Kind() { + case reflect.Struct: + childType = reflect.TypeOf(map[string]any{}) + default: + childType = v.Type().Elem() + } + + sType := reflect.SliceOf(childType) + + addrVal := reflect.New(sType) + + vSlice := reflect.MakeSlice(sType, v.Len(), v.Cap()) + + if v.Len() > 0 { + reflect.Indirect(addrVal).Set(vSlice) + + err := d.decode(keyName, v.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + } + + vSlice = reflect.Indirect(addrVal) + + valMap.SetMapIndex(reflect.ValueOf(keyName), vSlice) + + break + } + + // When deep mapping is not needed, fallthrough to normal copy + fallthrough + default: valMap.SetMapIndex(reflect.ValueOf(keyName), v) } @@ -1471,7 +1598,10 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e remain := false // We always parse the tags cause we're looking for other tags too - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + tagParts := getTagParts(fieldType, d.config.TagName) + if len(tagParts) == 0 { + tagParts = []string{""} + } for _, tag := range tagParts[1:] { if tag == d.config.SquashTagOption { squash = true @@ -1492,6 +1622,18 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e if !fieldVal.IsNil() { structs = append(structs, fieldVal.Elem().Elem()) } + case reflect.Ptr: + if fieldVal.Type().Elem().Kind() == reflect.Struct { + if fieldVal.IsNil() { + fieldVal.Set(reflect.New(fieldVal.Type().Elem())) + } + structs = append(structs, fieldVal.Elem()) + } else { + errs = append(errs, newDecodeError( + name+"."+fieldType.Name, + fmt.Errorf("unsupported type for squashed pointer: %s", fieldVal.Type().Elem().Kind()), + )) + } default: errs = append(errs, newDecodeError( name+"."+fieldType.Name, @@ -1516,13 +1658,15 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e field, fieldValue := f.field, f.val fieldName := field.Name - tagValue := field.Tag.Get(d.config.TagName) + tagValue, _ := getTagValue(field, d.config.TagName) if tagValue == "" && d.config.IgnoreUntaggedFields { continue } tagValue = strings.SplitN(tagValue, ",", 2)[0] if tagValue != "" { fieldName = tagValue + } else { + fieldName = d.config.MapFieldName(fieldName) } rawMapKey := reflect.ValueOf(fieldName) @@ -1605,8 +1749,14 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } sort.Strings(keys) + // Improve error message when name is empty by showing the target struct type + // in the case where it is empty for embedded structs. + errorName := name + if errorName == "" { + errorName = val.Type().String() + } errs = append(errs, newDecodeError( - name, + errorName, fmt.Errorf("has invalid keys: %s", strings.Join(keys, ", ")), )) } @@ -1692,7 +1842,7 @@ func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields return true } - if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + if checkMapstructureTags && hasAnyTag(f, tagName) { // check for mapstructure tags inside return true } } @@ -1700,13 +1850,99 @@ func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, } func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + if v.Kind() != reflect.Ptr { + return v + } + + switch v.Elem().Kind() { + case reflect.Slice: + return v.Elem() + + case reflect.Struct: + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v + + default: return v } - deref := v.Elem() - derefT := deref.Type() - if isStructTypeConvertibleToMap(derefT, true, tagName) { - return deref +} + +func hasAnyTag(field reflect.StructField, tagName string) bool { + _, ok := getTagValue(field, tagName) + return ok +} + +func getTagParts(field reflect.StructField, tagName string) []string { + tagValue, ok := getTagValue(field, tagName) + if !ok { + return nil } - return v + return strings.Split(tagValue, ",") +} + +func getTagValue(field reflect.StructField, tagName string) (string, bool) { + for _, name := range splitTagNames(tagName) { + if tag := field.Tag.Get(name); tag != "" { + return tag, true + } + } + return "", false +} + +func splitTagNames(tagName string) []string { + if tagName == "" { + return []string{"mapstructure"} + } + parts := strings.Split(tagName, ",") + result := make([]string, 0, len(parts)) + + for _, name := range parts { + name = strings.TrimSpace(name) + if name != "" { + result = append(result, name) + } + } + + return result +} + +// unmarshalerType is cached for performance +var unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + +// getUnmarshaler checks if the value implements Unmarshaler and returns +// the Unmarshaler and a boolean indicating if it was found. It handles both +// pointer and value receivers. +func getUnmarshaler(val reflect.Value) (Unmarshaler, bool) { + // Skip invalid or nil values + if !val.IsValid() { + return nil, false + } + + switch val.Kind() { + case reflect.Pointer, reflect.Interface: + if val.IsNil() { + return nil, false + } + } + + // Check pointer receiver first (most common case) + if val.CanAddr() { + ptrVal := val.Addr() + // Quick check: if no methods, can't implement any interface + if ptrVal.Type().NumMethod() > 0 && ptrVal.Type().Implements(unmarshalerType) { + return ptrVal.Interface().(Unmarshaler), true + } + } + + // Check value receiver + // Quick check: if no methods, can't implement any interface + if val.Type().NumMethod() > 0 && val.CanInterface() && val.Type().Implements(unmarshalerType) { + return val.Interface().(Unmarshaler), true + } + + return nil, false } diff --git a/vendor/github.com/google/go-tpm/tpm2/errors.go b/vendor/github.com/google/go-tpm/tpm2/errors.go index 0de8e40964a..6336459e6f9 100644 --- a/vendor/github.com/google/go-tpm/tpm2/errors.go +++ b/vendor/github.com/google/go-tpm/tpm2/errors.go @@ -166,7 +166,7 @@ var fmt1Descs = map[TPMRC]errorDesc{ description: "value is out of range or is not correct for the context", }, TPMRCHierarchy: { - name: "TPM_RC_HIERATPMRCHY", + name: "TPM_RC_HIERARCHY", description: "hierarchy is not enabled or is not correct for the use", }, TPMRCKeySize: { diff --git a/vendor/github.com/google/go-tpm/tpm2/marshalling.go b/vendor/github.com/google/go-tpm/tpm2/marshalling.go index 04d16074611..3ebd180087e 100644 --- a/vendor/github.com/google/go-tpm/tpm2/marshalling.go +++ b/vendor/github.com/google/go-tpm/tpm2/marshalling.go @@ -2,6 +2,7 @@ package tpm2 import ( "bytes" + "encoding/binary" "fmt" "reflect" ) @@ -126,3 +127,251 @@ func (b *boxed[T]) unmarshal(buf *bytes.Buffer) error { b.Contents = new(T) return unmarshal(buf, reflect.ValueOf(b.Contents)) } + +// MarshalCommand marshals a TPM command into its raw cpHash preimage format. +// The returned bytes can be directly hashed to compute cpHash. +// +// Example: +// +// cmdData, _ := MarshalCommand(myCmd) +// cpHash := sha256.Sum256(cmdData) +// +// Note: Encrypted command parameters (via sessions) are not currently supported. +// The marshaled parameters are in their unencrypted form. +func MarshalCommand[C Command[R, *R], R any](cmd C) ([]byte, error) { + cc := cmd.Command() + + names, err := cmdNames(cmd) + if err != nil { + return nil, err + } + + params, err := cmdParameters(cmd, nil) + if err != nil { + return nil, err + } + + // Build raw cpHash preimage: CommandCode {∥ Name1 {∥ Name2 {∥ Name3 }}} {∥ Parameters } + // See section 16.7 of TPM 2.0 specification, part 1. + buf := new(bytes.Buffer) + + if err := binary.Write(buf, binary.BigEndian, cc); err != nil { + return nil, fmt.Errorf("marshalling command code: %w", err) + } + + for i, name := range names { + if _, err := buf.Write(name.Buffer); err != nil { + return nil, fmt.Errorf("marshalling name %d: %w", i, err) + } + } + + if _, err := buf.Write(params); err != nil { + return nil, fmt.Errorf("marshalling parameters: %w", err) + } + + return buf.Bytes(), nil +} + +// UnmarshalCommand unmarshals a raw cpHash preimage back into a TPM command. +// The data should be the output from [MarshalCommand]. +// +// Example: +// +// cmdData, _ := MarshalCommand(myCmd) +// cmd, _ := UnmarshalCommand[MyCommandType](cmdData) +// +// Notes: +// - command produced from this function is not meant to be executed directly on a TPM, +// instead it is expected to be used for purposes such as auditing or inspection. +// - encrypted command parameters (via sessions) are not currently supported. +func UnmarshalCommand[C Command[R, *R], R any](data []byte) (C, error) { + var cmd C + + if data == nil { + return cmd, fmt.Errorf("data cannot be nil") + } + + buf := bytes.NewBuffer(data) + + var cc TPMCC + if err := binary.Read(buf, binary.BigEndian, &cc); err != nil { + return cmd, fmt.Errorf("unmarshalling command code: %w", err) + } + + if cc != cmd.Command() { + return cmd, fmt.Errorf("command code mismatch: expected %v, got %v", cmd.Command(), cc) + } + + expectedNames, err := cmdNames(cmd) + if err != nil { + return cmd, fmt.Errorf("getting expected names count: %w", err) + } + numNames := len(expectedNames) + + names := make([]TPM2BName, numNames) + for i := range numNames { + remaining := buf.Bytes() + if len(remaining) == 0 { + return cmd, fmt.Errorf("unexpected end of data while parsing name %d", i) + } + + nameSize, err := parseNameSize(remaining) + if err != nil { + return cmd, fmt.Errorf("parsing name %d size: %w", i, err) + } + + if len(remaining) < nameSize { + return cmd, fmt.Errorf("insufficient data for name %d: need %d bytes, have %d", i, nameSize, len(remaining)) + } + + nameBytes := make([]byte, nameSize) + if _, err := buf.Read(nameBytes); err != nil { + return cmd, fmt.Errorf("reading name %d: %w", i, err) + } + + names[i] = TPM2BName{Buffer: nameBytes} + } + + // Populate the command's handle fields from the names + if err := populateHandlesFromNames(&cmd, names); err != nil { + return cmd, err + } + + params := buf.Bytes() + + paramsBuf := bytes.NewBuffer(params) + if err := unmarshalCmdParameters(paramsBuf, &cmd, nil); err != nil { + return cmd, err + } + return cmd, nil +} + +// parseNameSize determines the size of a TPM2BName by inspecting its first bytes. +// Returns the total size in bytes for the name. +// +// Case 1: Handle-based names (4 bytes) +// - 0x0000... → PCR +// - 0x02... → HMAC Session +// - 0x03... → Policy Session +// - 0x40... → Permanent Values +// +// Case 2: Hash-based names (2 + hash_size bytes) - for all other entities +// - Format: nameAlg (2 bytes) || H_nameAlg (hash digest) +// +// See section 14 of TPM 2.0 specification, part 1. +func parseNameSize(buf []byte) (int, error) { + if len(buf) < 2 { + return 0, fmt.Errorf("buffer too short to parse name") + } + + firstByte := TPMHT(buf[0]) + firstTwoBytes := binary.BigEndian.Uint16(buf[0:2]) + + // Case 1: Handle-based names (4 bytes) + switch { + case firstTwoBytes == 0x0000: + // PCR handles (pattern: 0x0000XXXX) + // Must check both bytes to distinguish from hash algorithms + // that also start with 0x00 (e.g., TPMAlgSHA256 = 0x000B) + return 4, nil + case firstByte == TPMHTHMACSession: // 0x02 + return 4, nil + case firstByte == TPMHTPolicySession: // 0x03 + return 4, nil + case firstByte == TPMHTPermanent: // 0x40 + return 4, nil + } + + // Case 2: Hash-based names (nameAlg || hash) + // firstTwoBytes is the algorithm ID (0x0001 to 0x00B3) + algID := TPMIAlgHash(firstTwoBytes) + hashAlg, err := algID.Hash() + if err != nil { + return 0, fmt.Errorf("unsupported hash algorithm 0x%x in name: %w", firstTwoBytes, err) + } + + // 2 bytes for algID + hash size + return 2 + hashAlg.Size(), nil +} + +// MarshalResponse marshals a TPM response into its raw rpHash preimage format. +// The returned bytes can be directly hashed to compute rpHash. +// +// Example: +// +// rspData, _ := MarshalResponse(myCmd, myRsp) +// rpHash := sha256.Sum256(rspData) +// +// Note: Encrypted response parameters (via sessions) are not currently supported. +func MarshalResponse[C Command[R, *R], R any](cmd C, rsp *R) ([]byte, error) { + cc := cmd.Command() + + params, err := marshalRspParameters(rsp, nil) + if err != nil { + return nil, err + } + + // Build raw rpHash preimage: responseCode || commandCode || parameters + buf := new(bytes.Buffer) + + // Write responseCode (4 bytes, always 0 for successful responses) + if err := binary.Write(buf, binary.BigEndian, uint32(0)); err != nil { + return nil, fmt.Errorf("marshalling response code: %w", err) + } + + if err := binary.Write(buf, binary.BigEndian, cc); err != nil { + return nil, fmt.Errorf("marshalling command code: %w", err) + } + + if _, err := buf.Write(params); err != nil { + return nil, fmt.Errorf("marshalling parameters: %w", err) + } + + return buf.Bytes(), nil +} + +// UnmarshalResponse unmarshals a raw rpHash preimage back into a TPM response. +// The data should be the output from [MarshalResponse]. +// +// Example: +// +// rspData, _ := MarshalResponse(commandCode, myRsp) +// rsp, _ := UnmarshalResponse[MyResponseType](rspData) +// +// Notes: +// - the result from this function is expected to be used for purposes such as auditing or inspection. +// - encrypted response parameters (via sessions) are not currently supported. +func UnmarshalResponse[R any](data []byte) (*R, error) { + var rsp R + + if data == nil { + return nil, fmt.Errorf("data cannot be nil") + } + + if len(data) < 8 { + return nil, fmt.Errorf("data too short: need at least 8 bytes (responseCode + commandCode), got %d", len(data)) + } + + buf := bytes.NewBuffer(data) + + var responseCode uint32 + if err := binary.Read(buf, binary.BigEndian, &responseCode); err != nil { + return nil, fmt.Errorf("unmarshalling response code: %w", err) + } + + if responseCode != 0 { + return nil, fmt.Errorf("invalid response code: expected 0, got 0x%x", responseCode) + } + + var cc TPMCC + if err := binary.Read(buf, binary.BigEndian, &cc); err != nil { + return nil, fmt.Errorf("unmarshalling command code: %w", err) + } + + params := buf.Bytes() + + if err := rspParameters(params, nil, &rsp); err != nil { + return nil, err + } + return &rsp, nil +} diff --git a/vendor/github.com/google/go-tpm/tpm2/reflect.go b/vendor/github.com/google/go-tpm/tpm2/reflect.go index d32473eba8f..863e5b1dac1 100644 --- a/vendor/github.com/google/go-tpm/tpm2/reflect.go +++ b/vendor/github.com/google/go-tpm/tpm2/reflect.go @@ -455,6 +455,165 @@ func unmarshalArray(buf *bytes.Buffer, v reflect.Value) error { return nil } +// unmarshalStructField unmarshals a single field of a struct. +// Returns nil if successful or if the field was skipped (e.g., optional field with zero size). +func unmarshalStructField(buf *bytes.Buffer, v reflect.Value, i int) error { + fieldType := v.Type().Field(i) + fieldValue := v.Field(i) + + if hasTag(fieldType, "skip") { + return nil + } + + list := hasTag(fieldType, "list") + if list && (fieldValue.Kind() != reflect.Slice) { + return fmt.Errorf("field '%v' of struct '%v' had the 'list' tag but was not a slice", + fieldType.Name, v.Type().Name()) + } + // Slices of anything but byte/uint8 must have the 'list' tag. + if !list && (fieldValue.Kind() == reflect.Slice) && (fieldType.Type.Elem().Kind() != reflect.Uint8) { + return fmt.Errorf("field '%v' of struct '%v' was a slice of non-byte but did not have the 'list' tag", + fieldType.Name, v.Type().Name()) + } + + if hasTag(fieldType, "optional") { + // Special case: Part 3 specifies some input/output + // parameters as "optional", which means that they are + // (2B-) sized fields that can be zero-length, even if the + // enclosed type has no legal empty serialization. + // When unmarshalling an optional field, test for zero size + // and skip if empty. + if buf.Len() >= 2 { + if binary.BigEndian.Uint16(buf.Bytes()) == 0 { + // Advance the buffer past the zero size and skip to the + // next field of the struct. + buf.Next(2) + return nil + } + // If non-zero size, proceed to unmarshal the contents below. + } + } + + // Handle nullable fields (for command parameters) + if fieldValue.Kind() == reflect.Uint32 && hasTag(fieldType, "nullable") { + var val uint32 + if err := binary.Read(buf, binary.BigEndian, &val); err != nil { + return fmt.Errorf("reading nullable uint32 parameter: %w", err) + } + fieldValue.SetUint(uint64(val)) + return nil + } else if fieldValue.Kind() == reflect.Uint16 && hasTag(fieldType, "nullable") { + var val uint16 + if err := binary.Read(buf, binary.BigEndian, &val); err != nil { + return fmt.Errorf("reading nullable uint16 parameter: %w", err) + } + fieldValue.SetUint(uint64(val)) + return nil + } + + sized := hasTag(fieldType, "sized") + sized8 := hasTag(fieldType, "sized8") + // If sized, unmarshal a size field first, then restrict + // unmarshalling to the given size + bufToReadFrom := buf + if sized { + var expectedSize uint16 + binary.Read(buf, binary.BigEndian, &expectedSize) + sizedBufArray := make([]byte, int(expectedSize)) + n, err := buf.Read(sizedBufArray) + if n != int(expectedSize) { + return fmt.Errorf("ran out of data reading sized parameter '%v' inside struct of type '%v'", + fieldType.Name, v.Type().Name()) + } + if err != nil { + return fmt.Errorf("error reading data for parameter '%v' inside struct of type '%v'", + fieldType.Name, v.Type().Name()) + } + bufToReadFrom = bytes.NewBuffer(sizedBufArray) + } + if sized8 { + var expectedSize uint8 + binary.Read(buf, binary.BigEndian, &expectedSize) + sizedBufArray := make([]byte, int(expectedSize)) + n, err := buf.Read(sizedBufArray) + if n != int(expectedSize) { + return fmt.Errorf("ran out of data reading sized parameter '%v' inside struct of type '%v'", + fieldType.Name, v.Type().Name()) + } + if err != nil { + return fmt.Errorf("error reading data for parameter '%v' inside struct of type '%v'", + fieldType.Name, v.Type().Name()) + } + bufToReadFrom = bytes.NewBuffer(sizedBufArray) + } + + tagName, _ := tag(fieldType, "tag") + if tagName != "" { + // Make a pass to create a map of tag values + // UInt64-valued fields with values greater than + // MaxInt64 cannot be selectors. + possibleSelectors := make(map[string]int64) + for j := 0; j < v.NumField(); j++ { + switch v.Field(j).Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + possibleSelectors[v.Type().Field(j).Name] = v.Field(j).Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val := v.Field(j).Uint() + if val <= math.MaxInt64 { + possibleSelectors[v.Type().Field(j).Name] = int64(val) + } + } + } + // Check that the tagged value was present (and numeric + // and smaller than MaxInt64) + tagValue, ok := possibleSelectors[tagName] + // Don't marshal anything if the tag value was TPM_ALG_NULL + if tagValue == int64(TPMAlgNull) { + return nil + } + if !ok { + return fmt.Errorf("union tag '%v' for member '%v' of struct '%v' did not reference "+ + "a numeric field of in64-compatible value", + tagName, fieldType.Name, v.Type().Name()) + } + var uwh unmarshallableWithHint + if fieldValue.CanAddr() && fieldValue.Addr().Type().AssignableTo(reflect.TypeOf(&uwh).Elem()) { + u := fieldValue.Addr().Interface().(unmarshallableWithHint) + contents, err := u.create(tagValue) + if err != nil { + return fmt.Errorf("unmarshalling field %v of struct of type '%v', %w", i, v.Type(), err) + } + err = unmarshal(buf, contents) + if err != nil { + return fmt.Errorf("unmarshalling field %v of struct of type '%v', %w", i, v.Type(), err) + } + } else if fieldValue.Type().AssignableTo(reflect.TypeOf(&uwh).Elem()) { + u := fieldValue.Interface().(unmarshallableWithHint) + contents, err := u.create(tagValue) + if err != nil { + return fmt.Errorf("unmarshalling field %v of struct of type '%v', %w", i, v.Type(), err) + } + err = unmarshal(buf, contents) + if err != nil { + return fmt.Errorf("unmarshalling field %v of struct of type '%v', %w", i, v.Type(), err) + } + } + } else { + if err := unmarshal(bufToReadFrom, fieldValue); err != nil { + return fmt.Errorf("unmarshalling field %v of struct of type '%v', %w", i, v.Type(), err) + } + } + + if sized || sized8 { + if bufToReadFrom.Len() != 0 { + return fmt.Errorf("extra data at the end of sized parameter '%v' inside struct of type '%v'", + fieldType.Name, v.Type().Name()) + } + } + + return nil +} + func unmarshalStruct(buf *bytes.Buffer, v reflect.Value) error { // Check if this is a bitwise-defined structure. This requires all the // exported members to be bitwise-defined. @@ -487,133 +646,9 @@ func unmarshalStruct(buf *bytes.Buffer, v reflect.Value) error { if numBitwise > 0 { return unmarshalBitwise(buf, v) } - for i := 0; i < v.NumField(); i++ { - if hasTag(v.Type().Field(i), "skip") { - continue - } - list := hasTag(v.Type().Field(i), "list") - if list && (v.Field(i).Kind() != reflect.Slice) { - return fmt.Errorf("field '%v' of struct '%v' had the 'list' tag but was not a slice", - v.Type().Field(i).Name, v.Type().Name()) - } - // Slices of anything but byte/uint8 must have the 'list' tag. - if !list && (v.Field(i).Kind() == reflect.Slice) && (v.Type().Field(i).Type.Elem().Kind() != reflect.Uint8) { - return fmt.Errorf("field '%v' of struct '%v' was a slice of non-byte but did not have the 'list' tag", - v.Type().Field(i).Name, v.Type().Name()) - } - if hasTag(v.Type().Field(i), "optional") { - // Special case: Part 3 specifies some input/output - // parameters as "optional", which means that they are - // (2B-) sized fields that can be zero-length, even if the - // enclosed type has no legal empty serialization. - // When unmarshalling an optional field, test for zero size - // and skip if empty. - if buf.Len() < 2 { - if binary.BigEndian.Uint16(buf.Bytes()) == 0 { - // Advance the buffer past the zero size and skip to the - // next field of the struct. - buf.Next(2) - continue - } - // If non-zero size, proceed to unmarshal the contents below. - } - } - sized := hasTag(v.Type().Field(i), "sized") - sized8 := hasTag(v.Type().Field(i), "sized8") - // If sized, unmarshal a size field first, then restrict - // unmarshalling to the given size - bufToReadFrom := buf - if sized { - var expectedSize uint16 - binary.Read(buf, binary.BigEndian, &expectedSize) - sizedBufArray := make([]byte, int(expectedSize)) - n, err := buf.Read(sizedBufArray) - if n != int(expectedSize) { - return fmt.Errorf("ran out of data reading sized parameter '%v' inside struct of type '%v'", - v.Type().Field(i).Name, v.Type().Name()) - } - if err != nil { - return fmt.Errorf("error reading data for parameter '%v' inside struct of type '%v'", - v.Type().Field(i).Name, v.Type().Name()) - } - bufToReadFrom = bytes.NewBuffer(sizedBufArray) - } - if sized8 { - var expectedSize uint8 - binary.Read(buf, binary.BigEndian, &expectedSize) - sizedBufArray := make([]byte, int(expectedSize)) - n, err := buf.Read(sizedBufArray) - if n != int(expectedSize) { - return fmt.Errorf("ran out of data reading sized parameter '%v' inside struct of type '%v'", - v.Type().Field(i).Name, v.Type().Name()) - } - if err != nil { - return fmt.Errorf("error reading data for parameter '%v' inside struct of type '%v'", - v.Type().Field(i).Name, v.Type().Name()) - } - bufToReadFrom = bytes.NewBuffer(sizedBufArray) - } - tag, _ := tag(v.Type().Field(i), "tag") - if tag != "" { - // Make a pass to create a map of tag values - // UInt64-valued fields with values greater than - // MaxInt64 cannot be selectors. - possibleSelectors := make(map[string]int64) - for j := 0; j < v.NumField(); j++ { - switch v.Field(j).Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - possibleSelectors[v.Type().Field(j).Name] = v.Field(j).Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - val := v.Field(j).Uint() - if val <= math.MaxInt64 { - possibleSelectors[v.Type().Field(j).Name] = int64(val) - } - } - } - // Check that the tagged value was present (and numeric - // and smaller than MaxInt64) - tagValue, ok := possibleSelectors[tag] - // Don't marshal anything if the tag value was TPM_ALG_NULL - if tagValue == int64(TPMAlgNull) { - continue - } - if !ok { - return fmt.Errorf("union tag '%v' for member '%v' of struct '%v' did not reference "+ - "a numeric field of in64-compatible value", - tag, v.Type().Field(i).Name, v.Type().Name()) - } - var uwh unmarshallableWithHint - if v.Field(i).CanAddr() && v.Field(i).Addr().Type().AssignableTo(reflect.TypeOf(&uwh).Elem()) { - u := v.Field(i).Addr().Interface().(unmarshallableWithHint) - contents, err := u.create(tagValue) - if err != nil { - return fmt.Errorf("unmarshalling field %v of struct of type '%v', %w", i, v.Type(), err) - } - err = unmarshal(buf, contents) - if err != nil { - return fmt.Errorf("unmarshalling field %v of struct of type '%v', %w", i, v.Type(), err) - } - } else if v.Field(i).Type().AssignableTo(reflect.TypeOf(&uwh).Elem()) { - u := v.Field(i).Interface().(unmarshallableWithHint) - contents, err := u.create(tagValue) - if err != nil { - return fmt.Errorf("unmarshalling field %v of struct of type '%v', %w", i, v.Type(), err) - } - err = unmarshal(buf, contents) - if err != nil { - return fmt.Errorf("unmarshalling field %v of struct of type '%v', %w", i, v.Type(), err) - } - } - } else { - if err := unmarshal(bufToReadFrom, v.Field(i)); err != nil { - return fmt.Errorf("unmarshalling field %v of struct of type '%v', %w", i, v.Type(), err) - } - } - if sized || sized8 { - if bufToReadFrom.Len() != 0 { - return fmt.Errorf("extra data at the end of sized parameter '%v' inside struct of type '%v'", - v.Type().Field(i).Name, v.Type().Name()) - } + for i := range v.NumField() { + if err := unmarshalStructField(buf, v, i); err != nil { + return err } } return nil @@ -854,6 +889,131 @@ func marshalParameter[R any](buf *bytes.Buffer, cmd Command[R, *R], i int) error return marshal(buf, parm) } +// unmarshalParameter will deserialize the given parameter of the command from the buffer. +// Returns an error if the value is not unmarshallable or if there's insufficient data. +func unmarshalParameter[C Command[R, *R], R any](buf *bytes.Buffer, cmd *C, i int) error { + numHandles := len(taggedMembers(reflect.ValueOf(*cmd), "handle", false)) + fieldIndex := numHandles + i + if fieldIndex >= reflect.TypeOf(*cmd).NumField() { + return fmt.Errorf("invalid parameter index %v", i) + } + + // Use unmarshalStructField to handle this field with all its tags + cmdValue := reflect.ValueOf(cmd).Elem() + return unmarshalStructField(buf, cmdValue, fieldIndex) +} + +// populateHandlesFromNames populates the handle fields of a command with handles +// created from the provided names. +// +// All handle fields are populated with [UnmarshalledHandle] +func populateHandlesFromNames[C Command[R, *R], R any](cmd *C, names []TPM2BName) error { + cmdValue := reflect.ValueOf(cmd).Elem() + cmdType := reflect.TypeOf(*cmd) + + nameIdx := 0 + for i := 0; i < cmdType.NumField(); i++ { + field := cmdType.Field(i) + + if !hasTag(field, "handle") { + break + } + + // Skip anonymous handles + if hasTag(field, "anon") { + continue + } + + if nameIdx >= len(names) { + return fmt.Errorf("not enough names for handle field %d", i) + } + + handleValue := UnmarshalledHandle{ + Name: names[nameIdx], + } + + cmdValue.Field(i).Set(reflect.ValueOf(handleValue)) + nameIdx++ + } + + if nameIdx != len(names) { + return fmt.Errorf("name count mismatch: used %d names, got %d", nameIdx, len(names)) + } + return nil +} + +// unmarshalCmdParameters unmarshals the parameters area of the command from a buffer. +// This is the inverse operation of cmdParameters. +// The first parameter may be decrypted by one of the sessions if provided. +func unmarshalCmdParameters[C Command[R, *R], R any](buf *bytes.Buffer, cmd *C, sess []Session) error { + parms := taggedMembers(reflect.ValueOf(*cmd), "handle", true) + if len(parms) == 0 { + return nil + } + + // Check if we need to decrypt the first parameter + decrypted := false + for i, s := range sess { + if s.IsDecryption() { + if decrypted { + return fmt.Errorf("too many decrypt sessions") + } + // Read the first parameter's size (2 bytes for TPM2B) + if buf.Len() < 2 { + return fmt.Errorf("insufficient data for first parameter size") + } + + // Peek at the size to know how much to decrypt + var size uint16 + tempBuf := *buf + if err := binary.Read(&tempBuf, binary.BigEndian, &size); err != nil { + return fmt.Errorf("reading first parameter size: %w", err) + } + + // Read size + content + if buf.Len() < int(2+size) { + return fmt.Errorf("insufficient data for first parameter") + } + + // Extract the parameter bytes (including size prefix) + paramBytes := make([]byte, 2+size) + if _, err := buf.Read(paramBytes); err != nil { + return fmt.Errorf("reading first parameter: %w", err) + } + + // Decrypt the content (skip the 2-byte size prefix) + if err := s.Decrypt(paramBytes[2:]); err != nil { + return fmt.Errorf("decrypting with session %d: %w", i, err) + } + + // Now unmarshal the decrypted parameter + paramBuf := bytes.NewBuffer(paramBytes) + if err := unmarshalParameter(paramBuf, cmd, 0); err != nil { + return fmt.Errorf("unmarshalling first parameter: %w", err) + } + + decrypted = true + break + } + } + + // If we didn't decrypt, unmarshal the first parameter normally + if !decrypted { + if err := unmarshalParameter(buf, cmd, 0); err != nil { + return fmt.Errorf("unmarshalling first parameter: %w", err) + } + } + + // Unmarshal the rest of the parameters + for i := 1; i < len(parms); i++ { + if err := unmarshalParameter(buf, cmd, i); err != nil { + return fmt.Errorf("unmarshalling parameter %d: %w", i, err) + } + } + + return nil +} + // cmdParameters returns the parameters area of the command. // The first parameter may be encrypted by one of the sessions. func cmdParameters[R any](cmd Command[R, *R], sess []Session) ([]byte, error) { @@ -1053,6 +1213,48 @@ func rspSessions(rsp *bytes.Buffer, rc TPMRC, cc TPMCC, names []TPM2BName, parms return nil } +// marshalRspParameters marshals the parameters area of a response. +func marshalRspParameters(rspStruct any, sess []Session) ([]byte, error) { + parameters := taggedMembers(reflect.ValueOf(rspStruct).Elem(), "handle", true) + if len(parameters) == 0 { + return nil, nil + } + + var firstParm bytes.Buffer + if err := marshal(&firstParm, parameters[0]); err != nil { + return nil, fmt.Errorf("marshalling first parameter: %w", err) + } + firstParmBytes := firstParm.Bytes() + + // Encrypt the first parameter if there are any encryption sessions. + encrypted := false + for i, s := range sess { + if s.IsEncryption() { + if encrypted { + return nil, fmt.Errorf("too many encrypt sessions") + } + if len(firstParmBytes) < 2 { + return nil, fmt.Errorf("first parameter is not a tpm2b") + } + err := s.Encrypt(firstParmBytes[2:]) + if err != nil { + return nil, fmt.Errorf("encrypting with session %d: %w", i, err) + } + encrypted = true + } + } + + var result bytes.Buffer + result.Write(firstParmBytes) + // Write the rest of the parameters normally. + for i := 1; i < len(parameters); i++ { + if err := marshal(&result, parameters[i]); err != nil { + return nil, fmt.Errorf("marshalling parameter %d: %w", i, err) + } + } + return result.Bytes(), nil +} + // rspParameters decrypts (if needed) the parameters area of the response // into the response structure. If there is a mismatch between the expected // and actual response structure, returns an error here. diff --git a/vendor/github.com/google/go-tpm/tpm2/tpm2.go b/vendor/github.com/google/go-tpm/tpm2/tpm2.go index 4a2ebae0239..0dbebafde3b 100644 --- a/vendor/github.com/google/go-tpm/tpm2/tpm2.go +++ b/vendor/github.com/google/go-tpm/tpm2/tpm2.go @@ -59,6 +59,32 @@ func (h AuthHandle) KnownName() *TPM2BName { return h.Handle.KnownName() } +// invalidHandleValue is the sentinel value used for handles that have been +// reconstructed from unmarshalling. +// This value is intentionally invalid to prevent accidental use with a real TPM. +const invalidHandleValue TPMHandle = 0xFFFFFFFF + +// UnmarshalledHandle represents a handle reconstructed from unmarshalling. +// This type is used for audit and inspection purposes where only the Name is +// available, not the actual TPM handle value. +// +// The HandleValue() method returns [invalidHandleValue] to prevent accidental +// use of these handles with a real TPM. +type UnmarshalledHandle struct { + Name TPM2BName +} + +// HandleValue implements the handle interface. +// Returns invalidHandleValue since unmarshalled handles don't have real TPM handle values. +func (h UnmarshalledHandle) HandleValue() uint32 { + return uint32(invalidHandleValue) +} + +// KnownName implements the handle interface. +func (h UnmarshalledHandle) KnownName() *TPM2BName { + return &h.Name +} + // Command is an interface for any TPM command, parameterized by its response // type. type Command[R any, PR *R] interface { @@ -277,7 +303,7 @@ type LoadExternalResponse struct { // See definition in Part 3, Commands, section 12.4 type ReadPublic struct { // TPM handle of an object - ObjectHandle TPMIDHObject `gotpm:"handle"` + ObjectHandle handle `gotpm:"handle"` } // Command implements the Command interface. diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE index 1409d6ab92f..bb1e9a486af 100644 --- a/vendor/github.com/hashicorp/go-version/LICENSE +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014 HashiCorp, Inc. +Copyright IBM Corp. 2014, 2025 Mozilla Public License, version 2.0 diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md index 4b7806cd964..83a8249f72c 100644 --- a/vendor/github.com/hashicorp/go-version/README.md +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -1,6 +1,7 @@ # Versioning Library for Go + ![Build Status](https://github.com/hashicorp/go-version/actions/workflows/go-tests.yml/badge.svg) -[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-version.svg)](https://pkg.go.dev/github.com/hashicorp/go-version) go-version is a library for parsing versions and version constraints, and verifying versions against a set of constraints. go-version @@ -12,7 +13,7 @@ Versions used with go-version must follow [SemVer](http://semver.org/). ## Installation and Usage Package documentation can be found on -[GoDoc](http://godoc.org/github.com/hashicorp/go-version). +[Go Reference](https://pkg.go.dev/github.com/hashicorp/go-version). Installation can be done with a normal `go get`: diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go index 29bdc4d2b5d..3964da070d6 100644 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -1,4 +1,4 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright IBM Corp. 2014, 2025 // SPDX-License-Identifier: MPL-2.0 package version @@ -8,8 +8,26 @@ import ( "regexp" "sort" "strings" + "sync" ) +var ( + constraintRegexp *regexp.Regexp + constraintRegexpOnce sync.Once +) + +func getConstraintRegexp() *regexp.Regexp { + constraintRegexpOnce.Do(func() { + // This heavy lifting only happens the first time this function is called + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + `<=|>=|!=|~>|<|>|=|`, + VersionRegexpRaw, + )) + }) + return constraintRegexp +} + // Constraint represents a single constraint for a version, such as // ">= 1.0". type Constraint struct { @@ -29,38 +47,11 @@ type Constraints []*Constraint type constraintFunc func(v, c *Version) bool -var constraintOperators map[string]constraintOperation - type constraintOperation struct { op operator f constraintFunc } -var constraintRegexp *regexp.Regexp - -func init() { - constraintOperators = map[string]constraintOperation{ - "": {op: equal, f: constraintEqual}, - "=": {op: equal, f: constraintEqual}, - "!=": {op: notEqual, f: constraintNotEqual}, - ">": {op: greaterThan, f: constraintGreaterThan}, - "<": {op: lessThan, f: constraintLessThan}, - ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, - "<=": {op: lessThanEqual, f: constraintLessThanEqual}, - "~>": {op: pessimistic, f: constraintPessimistic}, - } - - ops := make([]string, 0, len(constraintOperators)) - for k := range constraintOperators { - ops = append(ops, regexp.QuoteMeta(k)) - } - - constraintRegexp = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - strings.Join(ops, "|"), - VersionRegexpRaw)) -} - // NewConstraint will parse one or more constraints from the given // constraint string. The string must be a comma-separated list of // constraints. @@ -107,7 +98,7 @@ func (cs Constraints) Check(v *Version) bool { // to '>0.2' it is *NOT* treated as equal. // // Missing operator is treated as equal to '=', whitespaces -// are ignored and constraints are sorted before comaparison. +// are ignored and constraints are sorted before comparison. func (cs Constraints) Equals(c Constraints) bool { if len(cs) != len(c) { return false @@ -176,9 +167,9 @@ func (c *Constraint) String() string { } func parseSingle(v string) (*Constraint, error) { - matches := constraintRegexp.FindStringSubmatch(v) + matches := getConstraintRegexp().FindStringSubmatch(v) if matches == nil { - return nil, fmt.Errorf("Malformed constraint: %s", v) + return nil, fmt.Errorf("malformed constraint: %s", v) } check, err := NewVersion(matches[2]) @@ -186,7 +177,25 @@ func parseSingle(v string) (*Constraint, error) { return nil, err } - cop := constraintOperators[matches[1]] + var cop constraintOperation + switch matches[1] { + case "=": + cop = constraintOperation{op: equal, f: constraintEqual} + case "!=": + cop = constraintOperation{op: notEqual, f: constraintNotEqual} + case ">": + cop = constraintOperation{op: greaterThan, f: constraintGreaterThan} + case "<": + cop = constraintOperation{op: lessThan, f: constraintLessThan} + case ">=": + cop = constraintOperation{op: greaterThanEqual, f: constraintGreaterThanEqual} + case "<=": + cop = constraintOperation{op: lessThanEqual, f: constraintLessThanEqual} + case "~>": + cop = constraintOperation{op: pessimistic, f: constraintPessimistic} + default: + cop = constraintOperation{op: equal, f: constraintEqual} + } return &Constraint{ f: cop.f, diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index 7c683c2813a..17b29732ee1 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -1,23 +1,39 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright IBM Corp. 2014, 2025 // SPDX-License-Identifier: MPL-2.0 package version import ( - "bytes" "database/sql/driver" "fmt" "regexp" "strconv" "strings" + "sync" ) // The compiled regular expression used to test the validity of a version. var ( - versionRegexp *regexp.Regexp - semverRegexp *regexp.Regexp + versionRegexp *regexp.Regexp + versionRegexpOnce sync.Once + semverRegexp *regexp.Regexp + semverRegexpOnce sync.Once ) +func getVersionRegexp() *regexp.Regexp { + versionRegexpOnce.Do(func() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + }) + return versionRegexp +} + +func getSemverRegexp() *regexp.Regexp { + semverRegexpOnce.Do(func() { + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") + }) + return semverRegexp +} + // The raw regular expression string used for testing the validity // of a version. const ( @@ -42,28 +58,23 @@ type Version struct { original string } -func init() { - versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") - semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") -} - // NewVersion parses the given version and returns a new // Version. func NewVersion(v string) (*Version, error) { - return newVersion(v, versionRegexp) + return newVersion(v, getVersionRegexp()) } // NewSemver parses the given version and returns a new // Version that adheres strictly to SemVer specs // https://semver.org/ func NewSemver(v string) (*Version, error) { - return newVersion(v, semverRegexp) + return newVersion(v, getSemverRegexp()) } func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { matches := pattern.FindStringSubmatch(v) if matches == nil { - return nil, fmt.Errorf("Malformed version: %s", v) + return nil, fmt.Errorf("malformed version: %s", v) } segmentsStr := strings.Split(matches[1], ".") segments := make([]int64, len(segmentsStr)) @@ -71,7 +82,7 @@ func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { val, err := strconv.ParseInt(str, 10, 64) if err != nil { return nil, fmt.Errorf( - "Error parsing version: %s", err) + "error parsing version: %s", err) } segments[i] = val @@ -174,7 +185,7 @@ func (v *Version) Compare(other *Version) int { } else if lhs < rhs { return -1 } - // Otherwis, rhs was > lhs, they're not equal + // Otherwise, rhs was > lhs, they're not equal return 1 } @@ -382,22 +393,29 @@ func (v *Version) Segments64() []int64 { // missing parts (1.0 => 1.0.0) will be made into a canonicalized form // as shown in the parenthesized examples. func (v *Version) String() string { - var buf bytes.Buffer - fmtParts := make([]string, len(v.segments)) + return string(v.bytes()) +} + +func (v *Version) bytes() []byte { + var buf []byte for i, s := range v.segments { - // We can ignore err here since we've pre-parsed the values in segments - str := strconv.FormatInt(s, 10) - fmtParts[i] = str + if i > 0 { + buf = append(buf, '.') + } + buf = strconv.AppendInt(buf, s, 10) } - fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { - fmt.Fprintf(&buf, "-%s", v.pre) + buf = append(buf, '-') + buf = append(buf, v.pre...) } + if v.metadata != "" { - fmt.Fprintf(&buf, "+%s", v.metadata) + buf = append(buf, '+') + buf = append(buf, v.metadata...) } - return buf.String() + return buf } // Original returns the original parsed version as-is, including any diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go index 83547fe13d6..11bc8b1c56c 100644 --- a/vendor/github.com/hashicorp/go-version/version_collection.go +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -1,4 +1,4 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright IBM Corp. 2014, 2025 // SPDX-License-Identifier: MPL-2.0 package version diff --git a/vendor/github.com/knadh/koanf/v2/README.md b/vendor/github.com/knadh/koanf/v2/README.md index 3130a2870ed..c94dbfbf4b7 100644 --- a/vendor/github.com/knadh/koanf/v2/README.md +++ b/vendor/github.com/knadh/koanf/v2/README.md @@ -52,8 +52,8 @@ go get -u github.com/knadh/koanf/parsers/toml ### Concepts -- `koanf.Provider` is a generic interface that provides configuration, for example, from files, environment variables, HTTP sources, or anywhere. The configuration can either be raw bytes that a parser can parse, or it can be a nested `map[string]interface{}` that can be directly loaded. -- `koanf.Parser` is a generic interface that takes raw bytes, parses, and returns a nested `map[string]interface{}`. For example, JSON and YAML parsers. +- `koanf.Provider` is a generic interface that provides configuration, for example, from files, environment variables, HTTP sources, or anywhere. The configuration can either be raw bytes that a parser can parse, or it can be a nested `map[string]any` that can be directly loaded. +- `koanf.Parser` is a generic interface that takes raw bytes, parses, and returns a nested `map[string]any`. For example, JSON and YAML parsers. - Once loaded into koanf, configuration are values queried by a delimited key path syntax. eg: `app.server.port`. Any delimiter can be chosen. - Configuration from multiple sources can be loaded and merged into a koanf instance, for example, load from a file first and override certain values with flags from the command line. @@ -133,7 +133,7 @@ func main() { // Watch the file and get a callback on change. The callback can do whatever, // like re-load the configuration. // File provider always returns a nil `event`. - f.Watch(func(event interface{}, err error) { + f.Watch(func(event any, err error) { if err != nil { log.Printf("watch error: %v", err) return @@ -430,7 +430,7 @@ func main() { #### Reading from nested maps -The bundled `confmap` provider takes a `map[string]interface{}` that can be loaded into a koanf instance. +The bundled `confmap` provider takes a `map[string]any` that can be loaded into a koanf instance. ```go package main @@ -453,7 +453,7 @@ func main() { // Load default values using the confmap provider. // We provide a flat map with the "." delimiter. // A nested map can be loaded by setting the delimiter to an empty string "". - k.Load(confmap.Provider(map[string]interface{}{ + k.Load(confmap.Provider(map[string]any{ "parent1.name": "Default Name", "parent3.name": "New name here", }, "."), nil) @@ -596,11 +596,11 @@ For example: merging JSON and YAML will most likely fail because JSON treats int ### Custom Providers and Parsers -A Provider returns a nested `map[string]interface{}` config that can be loaded directly into koanf with `koanf.Load()` or it can return raw bytes that can be parsed with a Parser (again, loaded using `koanf.Load()`. Writing Providers and Parsers are easy. See the bundled implementations in the [providers](https://github.com/knadh/koanf/tree/master/providers) and [parsers](https://github.com/knadh/koanf/tree/master/parsers) directories. +A Provider returns a nested `map[string]any` config that can be loaded directly into koanf with `koanf.Load()` or it can return raw bytes that can be parsed with a Parser (again, loaded using `koanf.Load()`. Writing Providers and Parsers are easy. See the bundled implementations in the [providers](https://github.com/knadh/koanf/tree/master/providers) and [parsers](https://github.com/knadh/koanf/tree/master/parsers) directories. ### Custom merge strategies -By default, when merging two config sources using `Load()`, koanf recursively merges keys of nested maps (`map[string]interface{}`), +By default, when merging two config sources using `Load()`, koanf recursively merges keys of nested maps (`map[string]any`), while static values are overwritten (slices, strings, etc). This behaviour can be changed by providing a custom merge function with the `WithMergeFunc` option. ```go @@ -630,7 +630,7 @@ func main() { } jsonPath := "mock/mock.json" - if err := k.Load(file.Provider(jsonPath), json.Parser(), koanf.WithMergeFunc(func(src, dest map[string]interface{}) error { + if err := k.Load(file.Provider(jsonPath), json.Parser(), koanf.WithMergeFunc(func(src, dest map[string]any) error { // Your custom logic, copying values from src into dst return nil })); err != nil { @@ -654,8 +654,8 @@ Install with `go get -u github.com/knadh/koanf/providers/$provider` | basicflag | `basicflag.Provider(f *flag.FlagSet, delim string)` | Takes a stdlib `flag.FlagSet` | | posflag | `posflag.Provider(f *pflag.FlagSet, delim string)` | Takes an `spf13/pflag.FlagSet` (advanced POSIX compatible flags with multiple types) and provides a nested config map based on delim. | | env/v2 | `env.Provider(prefix, delim string, f func(s string) string)` | Takes an optional prefix to filter env variables by, an optional function that takes and returns a string to transform env variables, and returns a nested config map based on delim. | -| confmap | `confmap.Provider(mp map[string]interface{}, delim string)` | Takes a premade `map[string]interface{}` conf map. If delim is provided, the keys are assumed to be flattened, thus unflattened using delim. | -| structs | `structs.Provider(s interface{}, tag string)` | Takes a struct and struct tag. | +| confmap | `confmap.Provider(mp map[string]any, delim string)` | Takes a premade `map[string]any` conf map. If delim is provided, the keys are assumed to be flattened, thus unflattened using delim. | +| structs | `structs.Provider(s any, tag string)` | Takes a struct and struct tag. | | s3 | `s3.Provider(s3.S3Config{})` | Takes a s3 config struct. | | rawbytes | `rawbytes.Provider(b []byte)` | Takes a raw `[]byte` slice to be parsed with a koanf.Parser | | vault/v2 | `vault.Provider(vault.Config{})` | Hashicorp Vault provider | diff --git a/vendor/github.com/knadh/koanf/v2/getters.go b/vendor/github.com/knadh/koanf/v2/getters.go index 266230f7470..6a50ac37c2e 100644 --- a/vendor/github.com/knadh/koanf/v2/getters.go +++ b/vendor/github.com/knadh/koanf/v2/getters.go @@ -51,7 +51,7 @@ func (ko *Koanf) Int64s(path string) []int64 { out = append(out, i) } return out - case []interface{}: + case []any: out = make([]int64, 0, len(v)) for _, vi := range v { i, err := toInt64(vi) @@ -91,7 +91,7 @@ func (ko *Koanf) Int64Map(path string) map[string]int64 { return out } - mp, ok := o.(map[string]interface{}) + mp, ok := o.(map[string]any) if !ok { return out } @@ -158,7 +158,7 @@ func (ko *Koanf) Ints(path string) []int { out = append(out, int(vi)) } return out - case []interface{}: + case []any: out = make([]int, 0, len(v)) for _, vi := range v { i, err := toInt64(vi) @@ -243,7 +243,7 @@ func (ko *Koanf) Float64s(path string) []float64 { switch v := o.(type) { case []float64: return v - case []interface{}: + case []any: out = make([]float64, 0, len(v)) for _, vi := range v { i, err := toFloat64(vi) @@ -283,7 +283,7 @@ func (ko *Koanf) Float64Map(path string) map[string]float64 { return out } - mp, ok := o.(map[string]interface{}) + mp, ok := o.(map[string]any) if !ok { return out } @@ -402,7 +402,7 @@ func (ko *Koanf) Strings(path string) []string { var out []string switch v := o.(type) { - case []interface{}: + case []any: out = make([]string, 0, len(v)) for _, u := range v { if s, ok := u.(string); ok { @@ -449,7 +449,7 @@ func (ko *Koanf) StringMap(path string) map[string]string { for k, v := range mp { out[k] = v } - case map[string]interface{}: + case map[string]any: out = make(map[string]string, len(mp)) for k, v := range mp { switch s := v.(type) { @@ -493,7 +493,7 @@ func (ko *Koanf) StringsMap(path string) map[string][]string { for k, v := range mp { out[k] = append(out[k], v...) } - case map[string][]interface{}: + case map[string][]any: out = make(map[string][]string, len(mp)) for k, v := range mp { for _, v := range v { @@ -505,13 +505,13 @@ func (ko *Koanf) StringsMap(path string) map[string][]string { } } } - case map[string]interface{}: + case map[string]any: out = make(map[string][]string, len(mp)) for k, v := range mp { switch s := v.(type) { case []string: out[k] = append(out[k], s...) - case []interface{}: + case []any: for _, v := range s { switch sv := v.(type) { case string: @@ -578,7 +578,7 @@ func (ko *Koanf) Bools(path string) []bool { var out []bool switch v := o.(type) { - case []interface{}: + case []any: out = make([]bool, 0, len(v)) for _, u := range v { b, err := toBool(u) @@ -616,7 +616,7 @@ func (ko *Koanf) BoolMap(path string) map[string]bool { return out } - mp, ok := o.(map[string]interface{}) + mp, ok := o.(map[string]any) if !ok { return out } diff --git a/vendor/github.com/knadh/koanf/v2/go.work.sum b/vendor/github.com/knadh/koanf/v2/go.work.sum index e19681eab3b..f6f501f9a77 100644 --- a/vendor/github.com/knadh/koanf/v2/go.work.sum +++ b/vendor/github.com/knadh/koanf/v2/go.work.sum @@ -602,6 +602,7 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -618,6 +619,7 @@ golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -633,6 +635,9 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= @@ -649,6 +654,8 @@ golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -668,9 +675,11 @@ golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457 h1:zf5N6UOrA487eEFacMePxjXAJctxKmyjKUsjA11Uzuk= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= +golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= @@ -679,12 +688,16 @@ golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -699,6 +712,8 @@ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxb golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= diff --git a/vendor/github.com/knadh/koanf/v2/interfaces.go b/vendor/github.com/knadh/koanf/v2/interfaces.go index ba69a2443ae..3f99ef8c7ac 100644 --- a/vendor/github.com/knadh/koanf/v2/interfaces.go +++ b/vendor/github.com/knadh/koanf/v2/interfaces.go @@ -7,14 +7,14 @@ type Provider interface { // with a Parser. ReadBytes() ([]byte, error) - // Read returns the parsed configuration as a nested map[string]interface{}. + // Read returns the parsed configuration as a nested map[string]any. // It is important to note that the string keys should not be flat delimited // keys like `parent.child.key`, but nested like `{parent: {child: {key: 1}}}`. - Read() (map[string]interface{}, error) + Read() (map[string]any, error) } // Parser represents a configuration format parser. type Parser interface { - Unmarshal([]byte) (map[string]interface{}, error) - Marshal(map[string]interface{}) ([]byte, error) + Unmarshal([]byte) (map[string]any, error) + Marshal(map[string]any) ([]byte, error) } diff --git a/vendor/github.com/knadh/koanf/v2/koanf.go b/vendor/github.com/knadh/koanf/v2/koanf.go index 66fae73b539..f7475d75171 100644 --- a/vendor/github.com/knadh/koanf/v2/koanf.go +++ b/vendor/github.com/knadh/koanf/v2/koanf.go @@ -16,8 +16,8 @@ import ( // Koanf is the configuration apparatus. type Koanf struct { - confMap map[string]interface{} - confMapFlat map[string]interface{} + confMap map[string]any + confMapFlat map[string]any keyMap KeyMap conf Conf mu sync.RWMutex @@ -79,20 +79,20 @@ func New(delim string) *Koanf { // NewWithConf returns a new instance of Koanf based on the Conf. func NewWithConf(conf Conf) *Koanf { return &Koanf{ - confMap: make(map[string]interface{}), - confMapFlat: make(map[string]interface{}), + confMap: make(map[string]any), + confMapFlat: make(map[string]any), keyMap: make(KeyMap), conf: conf, } } -// Load takes a Provider that either provides a parsed config map[string]interface{} +// Load takes a Provider that either provides a parsed config map[string]any // in which case pa (Parser) can be nil, or raw bytes to be parsed, where a Parser // can be provided to parse. Additionally, options can be passed which modify the // load behavior, such as passing a custom merge function. func (ko *Koanf) Load(p Provider, pa Parser, opts ...Option) error { var ( - mp map[string]interface{} + mp map[string]any err error ) @@ -151,7 +151,7 @@ func (ko *Koanf) KeyMap() KeyMap { // All returns a map of all flattened key paths and their values. // Note that it uses maps.Copy to create a copy that uses // json.Marshal which changes the numeric types to float64. -func (ko *Koanf) All() map[string]interface{} { +func (ko *Koanf) All() map[string]any { ko.mu.RLock() defer ko.mu.RUnlock() return maps.Copy(ko.confMapFlat) @@ -160,7 +160,7 @@ func (ko *Koanf) All() map[string]interface{} { // Raw returns a copy of the full raw conf map. // Note that it uses maps.Copy to create a copy that uses // json.Marshal which changes the numeric types to float64. -func (ko *Koanf) Raw() map[string]interface{} { +func (ko *Koanf) Raw() map[string]any { ko.mu.RLock() defer ko.mu.RUnlock() return maps.Copy(ko.confMap) @@ -193,10 +193,10 @@ func (ko *Koanf) Print() { // instance with the config map `sub.a.b` where everything above // `parent.child` are cut out. func (ko *Koanf) Cut(path string) *Koanf { - out := make(map[string]interface{}) + out := make(map[string]any) // Cut only makes sense if the requested key path is a map. - if v, ok := ko.Get(path).(map[string]interface{}); ok { + if v, ok := ko.Get(path).(map[string]any); ok { out = v } @@ -227,7 +227,7 @@ func (ko *Koanf) MergeAt(in *Koanf, path string) error { } // Unflatten the config map with the given key path. - n := maps.Unflatten(map[string]interface{}{ + n := maps.Unflatten(map[string]any{ path: in.Raw(), }, ko.conf.Delim) @@ -235,9 +235,9 @@ func (ko *Koanf) MergeAt(in *Koanf, path string) error { } // Set sets the value at a specific key. -func (ko *Koanf) Set(key string, val interface{}) error { +func (ko *Koanf) Set(key string, val any) error { // Unflatten the config map with the given key path. - n := maps.Unflatten(map[string]interface{}{ + n := maps.Unflatten(map[string]any{ key: val, }, ko.conf.Delim) @@ -254,14 +254,14 @@ func (ko *Koanf) Marshal(p Parser) ([]byte, error) { // the mapstructure lib. If no path is specified, the whole map is unmarshalled. // `koanf` is the struct field tag used to match field names. To customize, // use UnmarshalWithConf(). It uses the mitchellh/mapstructure package. -func (ko *Koanf) Unmarshal(path string, o interface{}) error { +func (ko *Koanf) Unmarshal(path string, o any) error { return ko.UnmarshalWithConf(path, o, UnmarshalConf{}) } // UnmarshalWithConf is like Unmarshal but takes configuration params in UnmarshalConf. // See mitchellh/mapstructure's DecoderConfig for advanced customization // of the unmarshal behaviour. -func (ko *Koanf) UnmarshalWithConf(path string, o interface{}, c UnmarshalConf) error { +func (ko *Koanf) UnmarshalWithConf(path string, o any, c UnmarshalConf) error { if c.DecoderConfig == nil { c.DecoderConfig = &mapstructure.DecoderConfig{ DecodeHook: mapstructure.ComposeDecodeHookFunc( @@ -288,7 +288,7 @@ func (ko *Koanf) UnmarshalWithConf(path string, o interface{}, c UnmarshalConf) // Unmarshal using flat key paths. mp := ko.Get(path) if c.FlatPaths { - if f, ok := mp.(map[string]interface{}); ok { + if f, ok := mp.(map[string]any); ok { fmp, _ := maps.Flatten(f, nil, ko.conf.Delim) mp = fmp } @@ -306,8 +306,8 @@ func (ko *Koanf) Delete(path string) { // No path. Erase the entire map. if path == "" { - ko.confMap = make(map[string]interface{}) - ko.confMapFlat = make(map[string]interface{}) + ko.confMap = make(map[string]any) + ko.confMapFlat = make(map[string]any) ko.keyMap = make(KeyMap) return } @@ -324,9 +324,9 @@ func (ko *Koanf) Delete(path string) { ko.keyMap = populateKeyParts(ko.keyMap, ko.conf.Delim) } -// Get returns the raw, uncast interface{} value of a given key path +// Get returns the raw, uncast any value of a given key path // in the config map. If the key path does not exist, nil is returned. -func (ko *Koanf) Get(path string) interface{} { +func (ko *Koanf) Get(path string) any { // No path. Return the whole conf map. if path == "" { return ko.Raw() @@ -349,19 +349,26 @@ func (ko *Koanf) Get(path string) interface{} { switch v := res.(type) { case int, int8, int16, int32, int64, float32, float64, string, bool: return v - case map[string]interface{}: + case map[string]any: return maps.Copy(v) + case nil: + return nil + } + + // Skil nil pointers before copying. + if rv := reflect.ValueOf(res); rv.Kind() == reflect.Ptr && rv.IsNil() { + return res } out, _ := copystructure.Copy(&res) - if ptrOut, ok := out.(*interface{}); ok { + if ptrOut, ok := out.(*any); ok { return *ptrOut } return out } // Slices returns a list of Koanf instances constructed out of a -// []map[string]interface{} interface at the given path. +// []map[string]any interface at the given path. func (ko *Koanf) Slices(path string) []*Koanf { out := []*Koanf{} if path == "" { @@ -369,13 +376,13 @@ func (ko *Koanf) Slices(path string) []*Koanf { } // Does the path exist? - sl, ok := ko.Get(path).([]interface{}) + sl, ok := ko.Get(path).([]any) if !ok { return out } for _, s := range sl { - mp, ok := s.(map[string]interface{}) + mp, ok := s.(map[string]any) if !ok { continue } @@ -408,7 +415,7 @@ func (ko *Koanf) MapKeys(path string) []string { return out } - mp, ok := o.(map[string]interface{}) + mp, ok := o.(map[string]any) if !ok { return out } @@ -425,7 +432,7 @@ func (ko *Koanf) Delim() string { return ko.conf.Delim } -func (ko *Koanf) merge(c map[string]interface{}, opts *options) error { +func (ko *Koanf) merge(c map[string]any, opts *options) error { ko.mu.Lock() defer ko.mu.Unlock() @@ -453,7 +460,7 @@ func (ko *Koanf) merge(c map[string]interface{}, opts *options) error { // converts and returns int64. If it's any other type, // forces it to a string and attempts to do a strconv.Atoi // to get an integer out. -func toInt64(v interface{}) (int64, error) { +func toInt64(v any) (int64, error) { switch i := v.(type) { case int: return int64(i), nil @@ -476,10 +483,10 @@ func toInt64(v interface{}) (int64, error) { return int64(f), nil } -// toInt64 takes a `v interface{}` value and if it is a float type, +// toInt64 takes a `v any` value and if it is a float type, // converts and returns a `float64`. If it's any other type, forces it to a // string and attempts to get a float out using `strconv.ParseFloat`. -func toFloat64(v interface{}) (float64, error) { +func toFloat64(v any) (float64, error) { switch i := v.(type) { case float32: return float64(i), nil @@ -499,7 +506,7 @@ func toFloat64(v interface{}) (float64, error) { // toBool takes an interface value and if it is a bool type, // returns it. If it's any other type, forces it to a string and attempts // to parse it as a bool using strconv.ParseBool. -func toBool(v interface{}) (bool, error) { +func toBool(v any) (bool, error) { if b, ok := v.(bool); ok { return b, nil } @@ -545,8 +552,8 @@ func textUnmarshalerHookFunc() mapstructure.DecodeHookFuncType { return func( f reflect.Type, t reflect.Type, - data interface{}, - ) (interface{}, error) { + data any, + ) (any, error) { if f.Kind() != reflect.String { return data, nil } diff --git a/vendor/github.com/knadh/koanf/v2/options.go b/vendor/github.com/knadh/koanf/v2/options.go index 63cea203e66..e53579749f7 100644 --- a/vendor/github.com/knadh/koanf/v2/options.go +++ b/vendor/github.com/knadh/koanf/v2/options.go @@ -2,7 +2,7 @@ package koanf // options contains options to modify the behavior of Koanf.Load. type options struct { - merge func(a, b map[string]interface{}) error + merge func(a, b map[string]any) error } // newOptions creates a new options instance. @@ -26,7 +26,7 @@ func (o *options) apply(opts []Option) { // If unset, the default merge function is used. // // The merge function is expected to merge map src into dest (left to right). -func WithMergeFunc(merge func(src, dest map[string]interface{}) error) Option { +func WithMergeFunc(merge func(src, dest map[string]any) error) Option { return func(o *options) { o.merge = merge } diff --git a/vendor/github.com/lufia/plan9stats/README.md b/vendor/github.com/lufia/plan9stats/README.md index 04bdcef733d..70e5386c874 100644 --- a/vendor/github.com/lufia/plan9stats/README.md +++ b/vendor/github.com/lufia/plan9stats/README.md @@ -3,11 +3,8 @@ A module for retrieving statistics of Plan 9 [![GoDev][godev-image]][godev-url] [![Actions Status][actions-image]][actions-url] -[![Coverage Status][coveralls-image]][coveralls-url] [godev-image]: https://pkg.go.dev/badge/github.com/lufia/plan9stats [godev-url]: https://pkg.go.dev/github.com/lufia/plan9stats [actions-image]: https://github.com/lufia/plan9stats/workflows/Test/badge.svg?branch=main [actions-url]: https://github.com/lufia/plan9stats/actions?workflow=Test -[coveralls-image]: https://coveralls.io/repos/github/lufia/plan9stats/badge.svg -[coveralls-url]: https://coveralls.io/github/lufia/plan9stats diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go index 2d1d45feb36..0956f34f246 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go @@ -82,6 +82,8 @@ func Put(buf []byte) { blockPool4M.Put((*[Block4Mb]byte)(buf[:c])) case Block8Mb: blockPool8M.Put((*[Block8Mb]byte)(buf[:c])) + case 0: + // Allow "returning" an empty buffer. default: panic(fmt.Errorf("invalid block size %d", c)) } diff --git a/vendor/github.com/pierrec/lz4/v4/reader.go b/vendor/github.com/pierrec/lz4/v4/reader.go index cc0aa5876d1..2628eb9c53d 100644 --- a/vendor/github.com/pierrec/lz4/v4/reader.go +++ b/vendor/github.com/pierrec/lz4/v4/reader.go @@ -63,7 +63,7 @@ func (r *Reader) Apply(options ...Option) (err error) { return } -// Size returns the size of the underlying uncompressed data, if set in the stream. +// Size returns the size of the current frame's uncompressed data, if set in the stream. func (r *Reader) Size() int { switch r.state.state { case readState, closedState: @@ -217,10 +217,8 @@ func (r *Reader) read(buf []byte) (int, error) { // initial state from NewReader, but instead reading from reader. // No access to reader is performed. func (r *Reader) Reset(reader io.Reader) { - if r.data != nil { - lz4block.Put(r.data) - r.data = nil - } + lz4block.Put(r.data) + r.data = nil r.frame.Reset(r.num) r.state.reset() r.src = reader @@ -267,24 +265,19 @@ func (r *Reader) WriteTo(w io.Writer) (n int64, err error) { switch err { case nil: case lz4errors.ErrEndOfStream: - // Read Checksum. err = r.frame.CloseR(r.src) if err != nil { return } - - //Check for new stream. + // Check for a new stream. r.Reset(r.src) if err = r.init(); r.state.next(err) { - if err == io.EOF { err = nil } - return } - goto read case io.EOF: err = r.frame.CloseR(r.src) diff --git a/vendor/github.com/pierrec/lz4/v4/state.go b/vendor/github.com/pierrec/lz4/v4/state.go index 1b0a3cbfd57..532316eeca3 100644 --- a/vendor/github.com/pierrec/lz4/v4/state.go +++ b/vendor/github.com/pierrec/lz4/v4/state.go @@ -38,11 +38,15 @@ func (s *_State) reset() { s.err = nil } -// next sets the state to the next one unless it is passed a non nil error. -// It returns whether or not it is in error. +// next sets the state to the next one unless it is passed a non-nil error. +// It returns whether it is in error. func (s *_State) next(err error) bool { if err != nil { - s.err = fmt.Errorf("%s: %w", s.state, err) + if errors.Is(err, io.EOF) { + s.err = io.EOF + } else { + s.err = fmt.Errorf("%s: %w", s.state, err) + } s.state = errorState return true } @@ -61,10 +65,12 @@ func (s *_State) check(errp *error) { return } if err := *errp; err != nil { - s.err = fmt.Errorf("%w[%s]", err, s.state) - if !errors.Is(err, io.EOF) { - s.state = errorState + if errors.Is(err, io.EOF) { + s.err = io.EOF + } else { + s.err = fmt.Errorf("%w[%s]", err, s.state) } + s.state = errorState } } diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go index 4358adee109..b2e87d70a1c 100644 --- a/vendor/github.com/pierrec/lz4/v4/writer.go +++ b/vendor/github.com/pierrec/lz4/v4/writer.go @@ -176,10 +176,8 @@ func (w *Writer) Close() error { } err := w.frame.CloseW(w.src, w.num) // It is now safe to free the buffer. - if w.data != nil { - lz4block.Put(w.data) - w.data = nil - } + lz4block.Put(w.data) + w.data = nil return err } diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go index c61a470fba8..d3b6dbc5329 100644 --- a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go @@ -61,17 +61,17 @@ func Times(percpu bool) ([]TimesStat, error) { } func TimesWithContext(_ context.Context, percpu bool) ([]TimesStat, error) { - lib, err := common.NewLibrary(common.System) + sys, err := common.NewSystemLib() if err != nil { return nil, err } - defer lib.Close() + defer sys.Close() if percpu { - return perCPUTimes(lib) + return perCPUTimes(sys) } - return allCPUTimes(lib) + return allCPUTimes(sys) } // Returns only one CPUInfoStat on FreeBSD @@ -138,16 +138,12 @@ func CountsWithContext(_ context.Context, logical bool) (int, error) { return int(count), nil } -func perCPUTimes(machLib *common.Library) ([]TimesStat, error) { - machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) - machTaskSelf := common.GetFunc[common.MachTaskSelfFunc](machLib, common.MachTaskSelfSym) - hostProcessorInfo := common.GetFunc[common.HostProcessorInfoFunc](machLib, common.HostProcessorInfoSym) - vmDeallocate := common.GetFunc[common.VMDeallocateFunc](machLib, common.VMDeallocateSym) - +func perCPUTimes(sys *common.SystemLib) ([]TimesStat, error) { var count, ncpu uint32 var cpuload *hostCpuLoadInfoData - status := hostProcessorInfo(machHostSelf(), processorCpuLoadInfo, &ncpu, uintptr(unsafe.Pointer(&cpuload)), &count) + status := sys.HostProcessorInfo(sys.MachHostSelf(), processorCpuLoadInfo, + &ncpu, uintptr(unsafe.Pointer(&cpuload)), &count) if status != common.KERN_SUCCESS { return nil, fmt.Errorf("host_processor_info error=%d", status) @@ -157,7 +153,7 @@ func perCPUTimes(machLib *common.Library) ([]TimesStat, error) { return nil, errors.New("host_processor_info returned nil cpuload") } - defer vmDeallocate(machTaskSelf(), uintptr(unsafe.Pointer(cpuload)), uintptr(ncpu)) + defer sys.VMDeallocate(sys.MachTaskSelf(), uintptr(unsafe.Pointer(cpuload)), uintptr(ncpu)) ret := []TimesStat{} loads := unsafe.Slice(cpuload, ncpu) @@ -170,21 +166,17 @@ func perCPUTimes(machLib *common.Library) ([]TimesStat, error) { Nice: float64(loads[i].cpuTicks[cpuStateNice]) / ClocksPerSec, Idle: float64(loads[i].cpuTicks[cpuStateIdle]) / ClocksPerSec, } - ret = append(ret, c) } return ret, nil } -func allCPUTimes(machLib *common.Library) ([]TimesStat, error) { - machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) - hostStatistics := common.GetFunc[common.HostStatisticsFunc](machLib, common.HostStatisticsSym) - +func allCPUTimes(sys *common.SystemLib) ([]TimesStat, error) { var cpuload hostCpuLoadInfoData count := uint32(cpuStateMax) - status := hostStatistics(machHostSelf(), common.HOST_CPU_LOAD_INFO, + status := sys.HostStatistics(sys.MachHostSelf(), common.HOST_CPU_LOAD_INFO, uintptr(unsafe.Pointer(&cpuload)), &count) if status != common.KERN_SUCCESS { diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go index 8e69d7cb138..2effcadf8da 100644 --- a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go @@ -13,55 +13,43 @@ import ( // https://github.com/shoenig/go-m1cpu/blob/v0.1.6/cpu.go func getFrequency() (float64, error) { - ioKit, err := common.NewLibrary(common.IOKit) + iokit, err := common.NewIOKitLib() if err != nil { return 0, err } - defer ioKit.Close() + defer iokit.Close() - coreFoundation, err := common.NewLibrary(common.CoreFoundation) + corefoundation, err := common.NewCoreFoundationLib() if err != nil { return 0, err } - defer coreFoundation.Close() + defer corefoundation.Close() - ioServiceMatching := common.GetFunc[common.IOServiceMatchingFunc](ioKit, common.IOServiceMatchingSym) - ioServiceGetMatchingServices := common.GetFunc[common.IOServiceGetMatchingServicesFunc](ioKit, common.IOServiceGetMatchingServicesSym) - ioIteratorNext := common.GetFunc[common.IOIteratorNextFunc](ioKit, common.IOIteratorNextSym) - ioRegistryEntryGetName := common.GetFunc[common.IORegistryEntryGetNameFunc](ioKit, common.IORegistryEntryGetNameSym) - ioRegistryEntryCreateCFProperty := common.GetFunc[common.IORegistryEntryCreateCFPropertyFunc](ioKit, common.IORegistryEntryCreateCFPropertySym) - ioObjectRelease := common.GetFunc[common.IOObjectReleaseFunc](ioKit, common.IOObjectReleaseSym) - - cfStringCreateWithCString := common.GetFunc[common.CFStringCreateWithCStringFunc](coreFoundation, common.CFStringCreateWithCStringSym) - cfDataGetLength := common.GetFunc[common.CFDataGetLengthFunc](coreFoundation, common.CFDataGetLengthSym) - cfDataGetBytePtr := common.GetFunc[common.CFDataGetBytePtrFunc](coreFoundation, common.CFDataGetBytePtrSym) - cfRelease := common.GetFunc[common.CFReleaseFunc](coreFoundation, common.CFReleaseSym) - - matching := ioServiceMatching("AppleARMIODevice") + matching := iokit.IOServiceMatching("AppleARMIODevice") var iterator uint32 - if status := ioServiceGetMatchingServices(common.KIOMainPortDefault, uintptr(matching), &iterator); status != common.KERN_SUCCESS { + if status := iokit.IOServiceGetMatchingServices(common.KIOMainPortDefault, uintptr(matching), &iterator); status != common.KERN_SUCCESS { return 0.0, fmt.Errorf("IOServiceGetMatchingServices error=%d", status) } - defer ioObjectRelease(iterator) + defer iokit.IOObjectRelease(iterator) - pCorekey := cfStringCreateWithCString(common.KCFAllocatorDefault, "voltage-states5-sram", common.KCFStringEncodingUTF8) - defer cfRelease(uintptr(pCorekey)) + pCorekey := corefoundation.CFStringCreateWithCString(common.KCFAllocatorDefault, "voltage-states5-sram", common.KCFStringEncodingUTF8) + defer corefoundation.CFRelease(uintptr(pCorekey)) var pCoreHz uint32 for { - service := ioIteratorNext(iterator) + service := iokit.IOIteratorNext(iterator) if service <= 0 { break } buf := common.NewCStr(512) - ioRegistryEntryGetName(service, buf) + iokit.IORegistryEntryGetName(service, buf) if buf.GoString() == "pmgr" { - pCoreRef := ioRegistryEntryCreateCFProperty(service, uintptr(pCorekey), common.KCFAllocatorDefault, common.KNilOptions) - length := cfDataGetLength(uintptr(pCoreRef)) - data := cfDataGetBytePtr(uintptr(pCoreRef)) + pCoreRef := iokit.IORegistryEntryCreateCFProperty(service, uintptr(pCorekey), common.KCFAllocatorDefault, common.KNilOptions) + length := corefoundation.CFDataGetLength(uintptr(pCoreRef)) + data := corefoundation.CFDataGetBytePtr(uintptr(pCoreRef)) // composite uint32 from the byte array buf := unsafe.Slice((*byte)(data), length) @@ -69,11 +57,12 @@ func getFrequency() (float64, error) { // combine the bytes into a uint32 value b := buf[length-8 : length-4] pCoreHz = binary.LittleEndian.Uint32(b) - ioObjectRelease(service) + corefoundation.CFRelease(uintptr(pCoreRef)) + iokit.IOObjectRelease(service) break } - ioObjectRelease(service) + iokit.IOObjectRelease(service) } return float64(pCoreHz / 1_000_000), nil diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go index 0897dfa3e78..4072f19cf2f 100644 --- a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go @@ -195,7 +195,7 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { c := InfoStat{CPU: -1, Cores: 1} for _, line := range lines { - fields := strings.Split(line, ":") + fields := strings.SplitN(line, ":", 2) if len(fields) < 2 { continue } @@ -221,6 +221,25 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { if strings.Contains(value, "S390") { processorName = "S390" } + case "mvendorid": + if !strings.HasPrefix(value, "0x") { + continue + } + + if v, err := strconv.ParseUint(value[2:], 16, 32); err == nil { + switch v { + case 0x31e: + c.VendorID = "Andes" + case 0x029: + c.VendorID = "Microchip" + case 0x127: + c.VendorID = "MIPS" + case 0x489: + c.VendorID = "SiFive" + case 0x5b7: + c.VendorID = "T-Head" + } + } case "CPU implementer": if v, err := strconv.ParseUint(value, 0, 8); err == nil { switch v { @@ -256,9 +275,9 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { c.VendorID = "Ampere" } } - case "cpu family": + case "cpu family", "marchid": c.Family = value - case "model", "CPU part": + case "model", "CPU part", "mimpid": c.Model = value // if CPU is arm based, model name is found via model number. refer to: arch/arm64/kernel/cpuinfo.c if c.VendorID == "ARM" { @@ -271,7 +290,7 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { } } } - case "Model Name", "model name", "cpu": + case "Model Name", "model name", "cpu", "uarch": c.ModelName = value if strings.Contains(value, "POWER") { c.Model = strings.Split(value, " ")[0] @@ -305,7 +324,7 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { return ret, err } c.CacheSize = int32(t) - case "physical id": + case "physical id", "hart": c.PhysicalID = value case "core id": c.CoreID = value @@ -313,6 +332,11 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { c.Flags = strings.FieldsFunc(value, func(r rune) bool { return r == ',' || r == ' ' }) + case "isa", "hart isa": + if len(c.Flags) != 0 || !strings.HasPrefix(value, "rv64") { + continue + } + c.Flags = riscvISAParse(value) case "microcode": c.Microcode = value } @@ -476,7 +500,7 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) { currentInfo = make(map[string]int) continue } - fields := strings.Split(line, ":") + fields := strings.SplitN(line, ":", 2) if len(fields) < 2 { continue } @@ -495,3 +519,13 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) { } return ret, nil } + +func riscvISAParse(s string) []string { + ext := strings.Split(s, "_") + if len(ext[0]) <= 4 { + return nil + } + // the base extensions must "rv64" prefix + base := strings.Split(ext[0][4:], "") + return append(base, ext[1:]...) +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go index 18315a018fa..a6000a3c50e 100644 --- a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go @@ -8,12 +8,14 @@ import ( "errors" "fmt" "math/bits" + "path/filepath" "strconv" + "strings" "syscall" "unsafe" - "github.com/yusufpapurcu/wmi" "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" "github.com/shirou/gopsutil/v4/internal/common" ) @@ -75,6 +77,8 @@ const ( smbiosEndOfTable = 127 // Minimum length for processor structure smbiosTypeProcessor = 4 // SMBIOS Type 4: Processor Information smbiosProcessorMinLength = 0x18 // Minimum length for processor structure + + centralProcessorRegistryKey = `HARDWARE\DESCRIPTION\System\CentralProcessor` ) type relationship uint32 @@ -179,61 +183,27 @@ func getProcessorPowerInformation(ctx context.Context) ([]processorPowerInformat func InfoWithContext(ctx context.Context) ([]InfoStat, error) { var ret []InfoStat - var dst []win32_Processor - q := wmi.CreateQuery(&dst, "") - if err := common.WMIQueryWithContext(ctx, q, &dst); err != nil { - return ret, err - } - - var procID string - for i, l := range dst { - procID = "" - if l.ProcessorID != nil { - procID = *l.ProcessorID - } - - cpu := InfoStat{ - CPU: int32(i), - Family: strconv.FormatUint(uint64(l.Family), 10), - VendorID: l.Manufacturer, - ModelName: l.Name, - Cores: int32(l.NumberOfLogicalProcessors), // TO BE REMOVED, set by getSystemLogicalProcessorInformationEx - PhysicalID: procID, - Mhz: float64(l.MaxClockSpeed), - Flags: []string{}, - } - ret = append(ret, cpu) - } - processorPackages, err := getSystemLogicalProcessorInformationEx(relationProcessorPackage) if err != nil { - // return an error whem wmi will be removed - // return ret, fmt.Errorf("failed to get processor package information: %w", err) - return ret, nil - } - - if len(processorPackages) != len(ret) { - // this should never happen, but it's kept for safety until wmi is removed - return ret, nil + return ret, fmt.Errorf("failed to get processor package information: %w", err) } ppis, powerInformationErr := getProcessorPowerInformation(ctx) if powerInformationErr != nil { - // return an error whem wmi will be removed - // return ret, fmt.Errorf("failed to get processor power information: %w", err) - return ret, nil + return ret, fmt.Errorf("failed to get processor power information: %w", powerInformationErr) } family, processorId, smBIOSErr := getSMBIOSProcessorInfo() if smBIOSErr != nil { - // return an error whem wmi will be removed - // return ret, smBIOSErr - return ret, nil + return ret, smBIOSErr } for i, pkg := range processorPackages { logicalCount := 0 maxMhz := 0 + model := "" + vendorId := "" + // iterate over each set bit in the package affinity mask for _, ga := range pkg.processor.groupMask { g := int(ga.group) forEachSetBit64(uint64(ga.mask), func(bit int) { @@ -246,12 +216,26 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { maxMhz = m } } + + registryKeyPath := filepath.Join(centralProcessorRegistryKey, strconv.Itoa(globalLpl)) + key, err := registry.OpenKey(registry.LOCAL_MACHINE, registryKeyPath, registry.QUERY_VALUE|registry.READ) + if err == nil { + model = getRegistryStringValueIfUnset(key, "ProcessorNameString", model) + vendorId = getRegistryStringValueIfUnset(key, "VendorIdentifier", vendorId) + _ = key.Close() + } }) } - ret[i].Mhz = float64(maxMhz) - ret[i].Cores = int32(logicalCount) - ret[i].Family = strconv.FormatUint(uint64(family), 10) - ret[i].PhysicalID = processorId + ret = append(ret, InfoStat{ + CPU: int32(i), + Family: strconv.FormatUint(uint64(family), 10), + VendorID: vendorId, + ModelName: model, + Cores: int32(logicalCount), + PhysicalID: processorId, + Mhz: float64(maxMhz), + Flags: []string{}, + }) } return ret, nil @@ -461,6 +445,17 @@ func getPhysicalCoreCount() (int, error) { return len(infos), err } +func getRegistryStringValueIfUnset(key registry.Key, keyName, value string) string { + if value != "" { + return value + } + val, _, err := key.GetStringValue(keyName) + if err == nil { + return strings.TrimSpace(val) + } + return "" +} + func CountsWithContext(_ context.Context, logical bool) (int, error) { if logical { // Get logical processor count https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L97 diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go index 851b5dc730e..e400ae63e50 100644 --- a/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go @@ -7,6 +7,7 @@ package common // - linux (amd64, arm) // - freebsd (amd64) // - windows (amd64) +// - aix (ppc64) import ( "bufio" @@ -442,7 +443,7 @@ func HostRootWithContext(ctx context.Context, combineWith ...string) string { } // getSysctrlEnv sets LC_ALL=C in a list of env vars for use when running -// sysctl commands (see DoSysctrl). +// sysctl commands. func getSysctrlEnv(env []string) []string { foundLC := false for i, line := range env { @@ -464,3 +465,7 @@ func Round(val float64, n int) float64 { // Multiply the value by pow10, round it, then divide it by pow10 return math.Round(val*pow10) / pow10 } + +func timeSince(ts uint64) uint64 { + return uint64(time.Now().Unix()) - ts +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_aix.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_aix.go new file mode 100644 index 00000000000..313a062c282 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_aix.go @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix + +package common + +import ( + "context" + "errors" + "strconv" + "strings" +) + +func BootTimeWithContext(ctx context.Context, invoke Invoker) (btime uint64, err error) { + ut, err := UptimeWithContext(ctx, invoke) + if err != nil { + return 0, err + } + + if ut <= 0 { + return 0, errors.New("uptime was not set, so cannot calculate boot time from it") + } + + return timeSince(ut), nil +} + +// Uses ps to get the elapsed time for PID 1 in DAYS-HOURS:MINUTES:SECONDS format. +// Examples of ps -o etimes -p 1 output: +// 124-01:40:39 (with days) +// 15:03:02 (without days, hours only) +// 01:02 (just-rebooted systems, minutes and seconds) +func UptimeWithContext(ctx context.Context, invoke Invoker) (uint64, error) { + out, err := invoke.CommandWithContext(ctx, "ps", "-o", "etimes", "-p", "1") + if err != nil { + return 0, err + } + + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + if len(lines) < 2 { + return 0, errors.New("ps output has fewer than 2 rows") + } + + // Extract the etimes value from the second row, trimming whitespace + etimes := strings.TrimSpace(lines[1]) + return ParseUptime(etimes), nil +} + +// Parses etimes output from ps command into total seconds. +// Handles formats like: +// - "124-01:40:39" (DAYS-HOURS:MINUTES:SECONDS) +// - "15:03:02" (HOURS:MINUTES:SECONDS) +// - "01:02" (MINUTES:SECONDS, from just-rebooted systems) +func ParseUptime(etimes string) uint64 { + var days, hours, mins, secs uint64 + + // Check if days component is present (contains a dash) + if strings.Contains(etimes, "-") { + parts := strings.Split(etimes, "-") + if len(parts) != 2 { + return 0 + } + + var err error + days, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return 0 + } + + // Parse the HH:MM:SS portion (after days, must have 3 parts) + etimes = parts[1] + timeParts := strings.Split(etimes, ":") + if len(timeParts) != 3 { + return 0 + } + + var err2 error + hours, err2 = strconv.ParseUint(timeParts[0], 10, 64) + if err2 != nil { + return 0 + } + + mins, err2 = strconv.ParseUint(timeParts[1], 10, 64) + if err2 != nil { + return 0 + } + + secs, err2 = strconv.ParseUint(timeParts[2], 10, 64) + if err2 != nil { + return 0 + } + } else { + // Parse time portions (either HH:MM:SS or MM:SS) when no days present + timeParts := strings.Split(etimes, ":") + switch len(timeParts) { + case 3: + // HH:MM:SS format + var err error + hours, err = strconv.ParseUint(timeParts[0], 10, 64) + if err != nil { + return 0 + } + + mins, err = strconv.ParseUint(timeParts[1], 10, 64) + if err != nil { + return 0 + } + + secs, err = strconv.ParseUint(timeParts[2], 10, 64) + if err != nil { + return 0 + } + case 2: + // MM:SS format (just-rebooted systems) + var err error + mins, err = strconv.ParseUint(timeParts[0], 10, 64) + if err != nil { + return 0 + } + + secs, err = strconv.ParseUint(timeParts[1], 10, 64) + if err != nil { + return 0 + } + default: + return 0 + } + } + + // Convert to total seconds + totalSeconds := (days * 24 * 60 * 60) + (hours * 60 * 60) + (mins * 60) + secs + return totalSeconds +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go index c9d610540e9..384b4c5a72b 100644 --- a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go @@ -4,113 +4,320 @@ package common import ( - "context" "errors" "fmt" - "os" - "os/exec" - "strings" + "math" "unsafe" "github.com/ebitengine/purego" - "golang.org/x/sys/unix" ) -func DoSysctrlWithContext(ctx context.Context, mib string) ([]string, error) { - cmd := exec.CommandContext(ctx, "sysctl", "-n", mib) - cmd.Env = getSysctrlEnv(os.Environ()) - out, err := cmd.Output() +// Library represents a dynamic library loaded by purego. +type library struct { + handle uintptr + fnMap map[string]any +} + +// library paths +const ( + IOKitLibPath = "/System/Library/Frameworks/IOKit.framework/IOKit" + CoreFoundationLibPath = "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation" + SystemLibPath = "/usr/lib/libSystem.B.dylib" +) + +func newLibrary(path string) (*library, error) { + lib, err := purego.Dlopen(path, purego.RTLD_LAZY|purego.RTLD_GLOBAL) if err != nil { - return []string{}, err - } - v := strings.Replace(string(out), "{ ", "", 1) - v = strings.Replace(string(v), " }", "", 1) - values := strings.Fields(string(v)) - - return values, nil -} - -func CallSyscall(mib []int32) ([]byte, uint64, error) { - miblen := uint64(len(mib)) - - // get required buffer size - length := uint64(0) - _, _, err := unix.Syscall6( - 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 - uintptr(unsafe.Pointer(&mib[0])), - uintptr(miblen), - 0, - uintptr(unsafe.Pointer(&length)), - 0, - 0) - if err != 0 { - var b []byte - return b, length, err + return nil, err } - if length == 0 { - var b []byte - return b, length, err + + return &library{ + handle: lib, + fnMap: make(map[string]any), + }, nil +} + +func (lib *library) Dlsym(symbol string) (uintptr, error) { + return purego.Dlsym(lib.handle, symbol) +} + +func getFunc[T any](lib *library, symbol string) T { + var dlfun *dlFunc[T] + if f, ok := lib.fnMap[symbol].(*dlFunc[T]); ok { + dlfun = f + } else { + dlfun = newDlfunc[T](symbol) + dlfun.init(lib.handle) + lib.fnMap[symbol] = dlfun } - // get proc info itself - buf := make([]byte, length) - _, _, err = unix.Syscall6( - 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 - uintptr(unsafe.Pointer(&mib[0])), - uintptr(miblen), - uintptr(unsafe.Pointer(&buf[0])), - uintptr(unsafe.Pointer(&length)), - 0, - 0) - if err != 0 { - return buf, length, err + return dlfun.fn +} + +func (lib *library) Close() { + purego.Dlclose(lib.handle) +} + +type dlFunc[T any] struct { + sym string + fn T +} + +func (d *dlFunc[T]) init(handle uintptr) { + purego.RegisterLibFunc(&d.fn, handle, d.sym) +} + +func newDlfunc[T any](sym string) *dlFunc[T] { + return &dlFunc[T]{sym: sym} +} + +type CoreFoundationLib struct { + *library +} + +func NewCoreFoundationLib() (*CoreFoundationLib, error) { + library, err := newLibrary(CoreFoundationLibPath) + if err != nil { + return nil, err } + return &CoreFoundationLib{library}, nil +} - return buf, length, nil +func (c *CoreFoundationLib) CFGetTypeID(cf uintptr) int64 { + fn := getFunc[CFGetTypeIDFunc](c.library, "CFGetTypeID") + return fn(cf) } -// Library represents a dynamic library loaded by purego. -type Library struct { - addr uintptr - path string - close func() +func (c *CoreFoundationLib) CFNumberCreate(allocator uintptr, theType int64, valuePtr uintptr) unsafe.Pointer { + fn := getFunc[CFNumberCreateFunc](c.library, "CFNumberCreate") + return fn(allocator, theType, valuePtr) } -// library paths -const ( - IOKit = "/System/Library/Frameworks/IOKit.framework/IOKit" - CoreFoundation = "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation" - System = "/usr/lib/libSystem.B.dylib" -) +func (c *CoreFoundationLib) CFNumberGetValue(num uintptr, theType int64, valuePtr uintptr) bool { + fn := getFunc[CFNumberGetValueFunc](c.library, "CFNumberGetValue") + return fn(num, theType, valuePtr) +} -func NewLibrary(path string) (*Library, error) { - lib, err := purego.Dlopen(path, purego.RTLD_LAZY|purego.RTLD_GLOBAL) +func (c *CoreFoundationLib) CFDictionaryCreate(allocator uintptr, keys, values *unsafe.Pointer, numValues int64, + keyCallBacks, valueCallBacks uintptr, +) unsafe.Pointer { + fn := getFunc[CFDictionaryCreateFunc](c.library, "CFDictionaryCreate") + return fn(allocator, keys, values, numValues, keyCallBacks, valueCallBacks) +} + +func (c *CoreFoundationLib) CFDictionaryAddValue(theDict, key, value uintptr) { + fn := getFunc[CFDictionaryAddValueFunc](c.library, "CFDictionaryAddValue") + fn(theDict, key, value) +} + +func (c *CoreFoundationLib) CFDictionaryGetValue(theDict, key uintptr) unsafe.Pointer { + fn := getFunc[CFDictionaryGetValueFunc](c.library, "CFDictionaryGetValue") + return fn(theDict, key) +} + +func (c *CoreFoundationLib) CFArrayGetCount(theArray uintptr) int64 { + fn := getFunc[CFArrayGetCountFunc](c.library, "CFArrayGetCount") + return fn(theArray) +} + +func (c *CoreFoundationLib) CFArrayGetValueAtIndex(theArray uintptr, index int64) unsafe.Pointer { + fn := getFunc[CFArrayGetValueAtIndexFunc](c.library, "CFArrayGetValueAtIndex") + return fn(theArray, index) +} + +func (c *CoreFoundationLib) CFStringCreateMutable(alloc uintptr, maxLength int64) unsafe.Pointer { + fn := getFunc[CFStringCreateMutableFunc](c.library, "CFStringCreateMutable") + return fn(alloc, maxLength) +} + +func (c *CoreFoundationLib) CFStringGetLength(theString uintptr) int64 { + fn := getFunc[CFStringGetLengthFunc](c.library, "CFStringGetLength") + return fn(theString) +} + +func (c *CoreFoundationLib) CFStringGetCString(theString uintptr, buffer CStr, bufferSize int64, encoding uint32) { + fn := getFunc[CFStringGetCStringFunc](c.library, "CFStringGetCString") + fn(theString, buffer, bufferSize, encoding) +} + +func (c *CoreFoundationLib) CFStringCreateWithCString(alloc uintptr, cStr string, encoding uint32) unsafe.Pointer { + fn := getFunc[CFStringCreateWithCStringFunc](c.library, "CFStringCreateWithCString") + return fn(alloc, cStr, encoding) +} + +func (c *CoreFoundationLib) CFDataGetLength(theData uintptr) int64 { + fn := getFunc[CFDataGetLengthFunc](c.library, "CFDataGetLength") + return fn(theData) +} + +func (c *CoreFoundationLib) CFDataGetBytePtr(theData uintptr) unsafe.Pointer { + fn := getFunc[CFDataGetBytePtrFunc](c.library, "CFDataGetBytePtr") + return fn(theData) +} + +func (c *CoreFoundationLib) CFRelease(cf uintptr) { + fn := getFunc[CFReleaseFunc](c.library, "CFRelease") + fn(cf) +} + +type IOKitLib struct { + *library +} + +func NewIOKitLib() (*IOKitLib, error) { + library, err := newLibrary(IOKitLibPath) if err != nil { return nil, err } + return &IOKitLib{library}, nil +} + +func (l *IOKitLib) IOServiceGetMatchingService(mainPort uint32, matching uintptr) uint32 { + fn := getFunc[IOServiceGetMatchingServiceFunc](l.library, "IOServiceGetMatchingService") + return fn(mainPort, matching) +} - closeFunc := func() { - purego.Dlclose(lib) +func (l *IOKitLib) IOServiceGetMatchingServices(mainPort uint32, matching uintptr, existing *uint32) int32 { + fn := getFunc[IOServiceGetMatchingServicesFunc](l.library, "IOServiceGetMatchingServices") + return fn(mainPort, matching, existing) +} + +func (l *IOKitLib) IOServiceMatching(name string) unsafe.Pointer { + fn := getFunc[IOServiceMatchingFunc](l.library, "IOServiceMatching") + return fn(name) +} + +func (l *IOKitLib) IOServiceOpen(service, owningTask, connType uint32, connect *uint32) int32 { + fn := getFunc[IOServiceOpenFunc](l.library, "IOServiceOpen") + return fn(service, owningTask, connType, connect) +} + +func (l *IOKitLib) IOServiceClose(connect uint32) int32 { + fn := getFunc[IOServiceCloseFunc](l.library, "IOServiceClose") + return fn(connect) +} + +func (l *IOKitLib) IOIteratorNext(iterator uint32) uint32 { + fn := getFunc[IOIteratorNextFunc](l.library, "IOIteratorNext") + return fn(iterator) +} + +func (l *IOKitLib) IORegistryEntryGetName(entry uint32, name CStr) int32 { + fn := getFunc[IORegistryEntryGetNameFunc](l.library, "IORegistryEntryGetName") + return fn(entry, name) +} + +func (l *IOKitLib) IORegistryEntryGetParentEntry(entry uint32, plane string, parent *uint32) int32 { + fn := getFunc[IORegistryEntryGetParentEntryFunc](l.library, "IORegistryEntryGetParentEntry") + return fn(entry, plane, parent) +} + +func (l *IOKitLib) IORegistryEntryCreateCFProperty(entry uint32, key, allocator uintptr, options uint32) unsafe.Pointer { + fn := getFunc[IORegistryEntryCreateCFPropertyFunc](l.library, "IORegistryEntryCreateCFProperty") + return fn(entry, key, allocator, options) +} + +func (l *IOKitLib) IORegistryEntryCreateCFProperties(entry uint32, properties unsafe.Pointer, allocator uintptr, options uint32) int32 { + fn := getFunc[IORegistryEntryCreateCFPropertiesFunc](l.library, "IORegistryEntryCreateCFProperties") + return fn(entry, properties, allocator, options) +} + +func (l *IOKitLib) IOObjectConformsTo(object uint32, className string) bool { + fn := getFunc[IOObjectConformsToFunc](l.library, "IOObjectConformsTo") + return fn(object, className) +} + +func (l *IOKitLib) IOObjectRelease(object uint32) int32 { + fn := getFunc[IOObjectReleaseFunc](l.library, "IOObjectRelease") + return fn(object) +} + +func (l *IOKitLib) IOConnectCallStructMethod(connection, selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int32 { + fn := getFunc[IOConnectCallStructMethodFunc](l.library, "IOConnectCallStructMethod") + return fn(connection, selector, inputStruct, inputStructCnt, outputStruct, outputStructCnt) +} + +func (l *IOKitLib) IOHIDEventSystemClientCreate(allocator uintptr) unsafe.Pointer { + fn := getFunc[IOHIDEventSystemClientCreateFunc](l.library, "IOHIDEventSystemClientCreate") + return fn(allocator) +} + +func (l *IOKitLib) IOHIDEventSystemClientSetMatching(client, match uintptr) int32 { + fn := getFunc[IOHIDEventSystemClientSetMatchingFunc](l.library, "IOHIDEventSystemClientSetMatching") + return fn(client, match) +} + +func (l *IOKitLib) IOHIDServiceClientCopyEvent(service uintptr, eventType int64, options int32, timeout int64) unsafe.Pointer { + fn := getFunc[IOHIDServiceClientCopyEventFunc](l.library, "IOHIDServiceClientCopyEvent") + return fn(service, eventType, options, timeout) +} + +func (l *IOKitLib) IOHIDServiceClientCopyProperty(service, property uintptr) unsafe.Pointer { + fn := getFunc[IOHIDServiceClientCopyPropertyFunc](l.library, "IOHIDServiceClientCopyProperty") + return fn(service, property) +} + +func (l *IOKitLib) IOHIDEventGetFloatValue(event uintptr, field int32) float64 { + fn := getFunc[IOHIDEventGetFloatValueFunc](l.library, "IOHIDEventGetFloatValue") + return fn(event, field) +} + +func (l *IOKitLib) IOHIDEventSystemClientCopyServices(client uintptr) unsafe.Pointer { + fn := getFunc[IOHIDEventSystemClientCopyServicesFunc](l.library, "IOHIDEventSystemClientCopyServices") + return fn(client) +} + +type SystemLib struct { + *library +} + +func NewSystemLib() (*SystemLib, error) { + library, err := newLibrary(SystemLibPath) + if err != nil { + return nil, err } + return &SystemLib{library}, nil +} - return &Library{ - addr: lib, - path: path, - close: closeFunc, - }, nil +func (s *SystemLib) HostProcessorInfo(host uint32, flavor int32, outProcessorCount *uint32, outProcessorInfo uintptr, + outProcessorInfoCnt *uint32, +) int32 { + fn := getFunc[HostProcessorInfoFunc](s.library, "host_processor_info") + return fn(host, flavor, outProcessorCount, outProcessorInfo, outProcessorInfoCnt) +} + +func (s *SystemLib) HostStatistics(host uint32, flavor int32, hostInfoOut uintptr, hostInfoOutCnt *uint32) int32 { + fn := getFunc[HostStatisticsFunc](s.library, "host_statistics") + return fn(host, flavor, hostInfoOut, hostInfoOutCnt) +} + +func (s *SystemLib) MachHostSelf() uint32 { + fn := getFunc[MachHostSelfFunc](s.library, "mach_host_self") + return fn() +} + +func (s *SystemLib) MachTaskSelf() uint32 { + fn := getFunc[MachTaskSelfFunc](s.library, "mach_task_self") + return fn() +} + +func (s *SystemLib) MachTimeBaseInfo(info uintptr) int32 { + fn := getFunc[MachTimeBaseInfoFunc](s.library, "mach_timebase_info") + return fn(info) } -func (lib *Library) Dlsym(symbol string) (uintptr, error) { - return purego.Dlsym(lib.addr, symbol) +func (s *SystemLib) VMDeallocate(targetTask uint32, vmAddress, vmSize uintptr) int32 { + fn := getFunc[VMDeallocateFunc](s.library, "vm_deallocate") + return fn(targetTask, vmAddress, vmSize) } -func GetFunc[T any](lib *Library, symbol string) T { - var fptr T - purego.RegisterLibFunc(&fptr, lib.addr, symbol) - return fptr +func (s *SystemLib) ProcPidPath(pid int32, buffer uintptr, bufferSize uint32) int32 { + fn := getFunc[ProcPidPathFunc](s.library, "proc_pidpath") + return fn(pid, buffer, bufferSize) } -func (lib *Library) Close() { - lib.close() +func (s *SystemLib) ProcPidInfo(pid, flavor int32, arg uint64, buffer uintptr, bufferSize int32) int32 { + fn := getFunc[ProcPidInfoFunc](s.library, "proc_pidinfo") + return fn(pid, flavor, arg, buffer, bufferSize) } // status codes @@ -118,24 +325,24 @@ const ( KERN_SUCCESS = 0 ) -// IOKit functions and symbols. +// IOKit types and constants. type ( IOServiceGetMatchingServiceFunc func(mainPort uint32, matching uintptr) uint32 - IOServiceGetMatchingServicesFunc func(mainPort uint32, matching uintptr, existing *uint32) int + IOServiceGetMatchingServicesFunc func(mainPort uint32, matching uintptr, existing *uint32) int32 IOServiceMatchingFunc func(name string) unsafe.Pointer - IOServiceOpenFunc func(service, owningTask, connType uint32, connect *uint32) int - IOServiceCloseFunc func(connect uint32) int + IOServiceOpenFunc func(service, owningTask, connType uint32, connect *uint32) int32 + IOServiceCloseFunc func(connect uint32) int32 IOIteratorNextFunc func(iterator uint32) uint32 - IORegistryEntryGetNameFunc func(entry uint32, name CStr) int - IORegistryEntryGetParentEntryFunc func(entry uint32, plane string, parent *uint32) int + IORegistryEntryGetNameFunc func(entry uint32, name CStr) int32 + IORegistryEntryGetParentEntryFunc func(entry uint32, plane string, parent *uint32) int32 IORegistryEntryCreateCFPropertyFunc func(entry uint32, key, allocator uintptr, options uint32) unsafe.Pointer - IORegistryEntryCreateCFPropertiesFunc func(entry uint32, properties unsafe.Pointer, allocator uintptr, options uint32) int + IORegistryEntryCreateCFPropertiesFunc func(entry uint32, properties unsafe.Pointer, allocator uintptr, options uint32) int32 IOObjectConformsToFunc func(object uint32, className string) bool - IOObjectReleaseFunc func(object uint32) int - IOConnectCallStructMethodFunc func(connection, selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int + IOObjectReleaseFunc func(object uint32) int32 + IOConnectCallStructMethodFunc func(connection, selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int32 IOHIDEventSystemClientCreateFunc func(allocator uintptr) unsafe.Pointer - IOHIDEventSystemClientSetMatchingFunc func(client, match uintptr) int + IOHIDEventSystemClientSetMatchingFunc func(client, match uintptr) int32 IOHIDServiceClientCopyEventFunc func(service uintptr, eventType int64, options int32, timeout int64) unsafe.Pointer IOHIDServiceClientCopyPropertyFunc func(service, property uintptr) unsafe.Pointer @@ -143,29 +350,6 @@ type ( IOHIDEventSystemClientCopyServicesFunc func(client uintptr) unsafe.Pointer ) -const ( - IOServiceGetMatchingServiceSym = "IOServiceGetMatchingService" - IOServiceGetMatchingServicesSym = "IOServiceGetMatchingServices" - IOServiceMatchingSym = "IOServiceMatching" - IOServiceOpenSym = "IOServiceOpen" - IOServiceCloseSym = "IOServiceClose" - IOIteratorNextSym = "IOIteratorNext" - IORegistryEntryGetNameSym = "IORegistryEntryGetName" - IORegistryEntryGetParentEntrySym = "IORegistryEntryGetParentEntry" - IORegistryEntryCreateCFPropertySym = "IORegistryEntryCreateCFProperty" - IORegistryEntryCreateCFPropertiesSym = "IORegistryEntryCreateCFProperties" - IOObjectConformsToSym = "IOObjectConformsTo" - IOObjectReleaseSym = "IOObjectRelease" - IOConnectCallStructMethodSym = "IOConnectCallStructMethod" - - IOHIDEventSystemClientCreateSym = "IOHIDEventSystemClientCreate" - IOHIDEventSystemClientSetMatchingSym = "IOHIDEventSystemClientSetMatching" - IOHIDServiceClientCopyEventSym = "IOHIDServiceClientCopyEvent" - IOHIDServiceClientCopyPropertySym = "IOHIDServiceClientCopyProperty" - IOHIDEventGetFloatValueSym = "IOHIDEventGetFloatValue" - IOHIDEventSystemClientCopyServicesSym = "IOHIDEventSystemClientCopyServices" -) - const ( KIOMainPortDefault = 0 @@ -179,52 +363,35 @@ const ( KIOServicePlane = "IOService" ) -// CoreFoundation functions and symbols. +// CoreFoundation types and constants. type ( - CFGetTypeIDFunc func(cf uintptr) int32 - CFNumberCreateFunc func(allocator uintptr, theType int32, valuePtr uintptr) unsafe.Pointer - CFNumberGetValueFunc func(num uintptr, theType int32, valuePtr uintptr) bool - CFDictionaryCreateFunc func(allocator uintptr, keys, values *unsafe.Pointer, numValues int32, + CFGetTypeIDFunc func(cf uintptr) int64 + CFNumberCreateFunc func(allocator uintptr, theType int64, valuePtr uintptr) unsafe.Pointer + CFNumberGetValueFunc func(num uintptr, theType int64, valuePtr uintptr) bool + CFDictionaryCreateFunc func(allocator uintptr, keys, values *unsafe.Pointer, numValues int64, keyCallBacks, valueCallBacks uintptr) unsafe.Pointer CFDictionaryAddValueFunc func(theDict, key, value uintptr) CFDictionaryGetValueFunc func(theDict, key uintptr) unsafe.Pointer - CFArrayGetCountFunc func(theArray uintptr) int32 - CFArrayGetValueAtIndexFunc func(theArray uintptr, index int32) unsafe.Pointer - CFStringCreateMutableFunc func(alloc uintptr, maxLength int32) unsafe.Pointer - CFStringGetLengthFunc func(theString uintptr) int32 - CFStringGetCStringFunc func(theString uintptr, buffer CStr, bufferSize int32, encoding uint32) + CFArrayGetCountFunc func(theArray uintptr) int64 + CFArrayGetValueAtIndexFunc func(theArray uintptr, index int64) unsafe.Pointer + CFStringCreateMutableFunc func(alloc uintptr, maxLength int64) unsafe.Pointer + CFStringGetLengthFunc func(theString uintptr) int64 + CFStringGetCStringFunc func(theString uintptr, buffer CStr, bufferSize int64, encoding uint32) CFStringCreateWithCStringFunc func(alloc uintptr, cStr string, encoding uint32) unsafe.Pointer - CFDataGetLengthFunc func(theData uintptr) int32 + CFDataGetLengthFunc func(theData uintptr) int64 CFDataGetBytePtrFunc func(theData uintptr) unsafe.Pointer CFReleaseFunc func(cf uintptr) ) -const ( - CFGetTypeIDSym = "CFGetTypeID" - CFNumberCreateSym = "CFNumberCreate" - CFNumberGetValueSym = "CFNumberGetValue" - CFDictionaryCreateSym = "CFDictionaryCreate" - CFDictionaryAddValueSym = "CFDictionaryAddValue" - CFDictionaryGetValueSym = "CFDictionaryGetValue" - CFArrayGetCountSym = "CFArrayGetCount" - CFArrayGetValueAtIndexSym = "CFArrayGetValueAtIndex" - CFStringCreateMutableSym = "CFStringCreateMutable" - CFStringGetLengthSym = "CFStringGetLength" - CFStringGetCStringSym = "CFStringGetCString" - CFStringCreateWithCStringSym = "CFStringCreateWithCString" - CFDataGetLengthSym = "CFDataGetLength" - CFDataGetBytePtrSym = "CFDataGetBytePtr" - CFReleaseSym = "CFRelease" -) - const ( KCFStringEncodingUTF8 = 0x08000100 KCFNumberSInt64Type = 4 KCFNumberIntType = 9 KCFAllocatorDefault = 0 + KCFNotFound = -1 ) -// Kernel functions and symbols. +// libSystem types and constants. type MachTimeBaseInfo struct { Numer uint32 Denom uint32 @@ -232,12 +399,12 @@ type MachTimeBaseInfo struct { type ( HostProcessorInfoFunc func(host uint32, flavor int32, outProcessorCount *uint32, outProcessorInfo uintptr, - outProcessorInfoCnt *uint32) int - HostStatisticsFunc func(host uint32, flavor int32, hostInfoOut uintptr, hostInfoOutCnt *uint32) int + outProcessorInfoCnt *uint32) int32 + HostStatisticsFunc func(host uint32, flavor int32, hostInfoOut uintptr, hostInfoOutCnt *uint32) int32 MachHostSelfFunc func() uint32 MachTaskSelfFunc func() uint32 - MachTimeBaseInfoFunc func(info uintptr) int - VMDeallocateFunc func(targetTask uint32, vmAddress, vmSize uintptr) int + MachTimeBaseInfoFunc func(info uintptr) int32 + VMDeallocateFunc func(targetTask uint32, vmAddress, vmSize uintptr) int32 ) const ( @@ -250,17 +417,12 @@ const ( ) const ( - CTL_KERN = 1 - KERN_ARGMAX = 8 - KERN_PROCARGS2 = 49 - HOST_VM_INFO = 2 HOST_CPU_LOAD_INFO = 3 HOST_VM_INFO_COUNT = 0xf ) -// System functions and symbols. type ( ProcPidPathFunc func(pid int32, buffer uintptr, bufferSize uint32) int32 ProcPidInfoFunc func(pid, flavor int32, arg uint64, buffer uintptr, bufferSize int32) int32 @@ -274,6 +436,7 @@ const ( const ( MAXPATHLEN = 1024 + PROC_PIDLISTFDS = 1 PROC_PIDPATHINFO_MAXSIZE = 4 * MAXPATHLEN PROC_PIDTASKINFO = 4 PROC_PIDVNODEPATHINFO = 9 @@ -281,9 +444,8 @@ const ( // SMC represents a SMC instance. type SMC struct { - lib *Library - conn uint32 - callStruct IOConnectCallStructMethodFunc + lib *IOKitLib + conn uint32 } const ioServiceSMC = "AppleSMC" @@ -305,59 +467,50 @@ const ( KSMCKeyNotFound = 132 ) -func NewSMC(ioKit *Library) (*SMC, error) { - if ioKit.path != IOKit { - return nil, errors.New("library is not IOKit") +func NewSMC() (*SMC, error) { + iokit, err := NewIOKitLib() + if err != nil { + return nil, err } - ioServiceGetMatchingService := GetFunc[IOServiceGetMatchingServiceFunc](ioKit, IOServiceGetMatchingServiceSym) - ioServiceMatching := GetFunc[IOServiceMatchingFunc](ioKit, IOServiceMatchingSym) - ioServiceOpen := GetFunc[IOServiceOpenFunc](ioKit, IOServiceOpenSym) - ioObjectRelease := GetFunc[IOObjectReleaseFunc](ioKit, IOObjectReleaseSym) - machTaskSelf := GetFunc[MachTaskSelfFunc](ioKit, MachTaskSelfSym) - - ioConnectCallStructMethod := GetFunc[IOConnectCallStructMethodFunc](ioKit, IOConnectCallStructMethodSym) - - service := ioServiceGetMatchingService(0, uintptr(ioServiceMatching(ioServiceSMC))) + service := iokit.IOServiceGetMatchingService(0, uintptr(iokit.IOServiceMatching(ioServiceSMC))) if service == 0 { return nil, fmt.Errorf("ERROR: %s NOT FOUND", ioServiceSMC) } var conn uint32 - if result := ioServiceOpen(service, machTaskSelf(), 0, &conn); result != 0 { + machTaskSelf := getFunc[MachTaskSelfFunc](iokit.library, "mach_task_self") + if result := iokit.IOServiceOpen(service, machTaskSelf(), 0, &conn); result != 0 { return nil, errors.New("ERROR: IOServiceOpen failed") } - ioObjectRelease(service) + iokit.IOObjectRelease(service) return &SMC{ - lib: ioKit, - conn: conn, - callStruct: ioConnectCallStructMethod, + lib: iokit, + conn: conn, }, nil } -func (s *SMC) CallStruct(selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int { - return s.callStruct(s.conn, selector, inputStruct, inputStructCnt, outputStruct, outputStructCnt) +func (s *SMC) CallStruct(selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int32 { + return s.lib.IOConnectCallStructMethod(s.conn, selector, inputStruct, inputStructCnt, outputStruct, outputStructCnt) } func (s *SMC) Close() error { - ioServiceClose := GetFunc[IOServiceCloseFunc](s.lib, IOServiceCloseSym) - - if result := ioServiceClose(s.conn); result != 0 { + if result := s.lib.IOServiceClose(s.conn); result != 0 { return errors.New("ERROR: IOServiceClose failed") } + s.lib.Close() return nil } type CStr []byte -func NewCStr(length int32) CStr { +func NewCStr(length int64) CStr { return make(CStr, length) } -func (s CStr) Length() int32 { - // Include null terminator to make CFStringGetCString properly functions - return int32(len(s)) + 1 +func (s CStr) Length() int64 { + return int64(len(s)) } func (s CStr) Ptr() *byte { @@ -398,3 +551,11 @@ func GoString(cStr *byte) string { } return string(unsafe.Slice(cStr, length)) } + +// https://github.com/apple-oss-distributions/CF/blob/dc54c6bb1c1e5e0b9486c1d26dd5bef110b20bf3/CFString.c#L463 +func GetCFStringBufLengthForUTF8(length int64) int64 { + if length > (math.MaxInt64 / 3) { + return KCFNotFound + } + return length*3 + 1 // includes null terminator +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go index 53cdceeb6d4..7a40a40c2bb 100644 --- a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go @@ -5,9 +5,6 @@ package common import ( "fmt" - "os" - "os/exec" - "strings" "unsafe" "golang.org/x/sys/unix" @@ -28,20 +25,6 @@ func SysctlUint(mib string) (uint64, error) { return 0, fmt.Errorf("unexpected size: %s, %d", mib, len(buf)) } -func DoSysctrl(mib string) ([]string, error) { - cmd := exec.Command("sysctl", "-n", mib) - cmd.Env = getSysctrlEnv(os.Environ()) - out, err := cmd.Output() - if err != nil { - return []string{}, err - } - v := strings.Replace(string(out), "{ ", "", 1) - v = strings.Replace(string(v), " }", "", 1) - values := strings.Fields(string(v)) - - return values, nil -} - func CallSyscall(mib []int32) ([]byte, uint64, error) { mibptr := unsafe.Pointer(&mib[0]) miblen := uint64(len(mib)) diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go index 1ec223150bb..a2473f41dcf 100644 --- a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go @@ -7,7 +7,6 @@ import ( "context" "errors" "os" - "os/exec" "path/filepath" "strconv" "strings" @@ -20,20 +19,6 @@ import ( // cachedBootTime must be accessed via atomic.Load/StoreUint64 var cachedBootTime uint64 -func DoSysctrl(mib string) ([]string, error) { - cmd := exec.Command("sysctl", "-n", mib) - cmd.Env = getSysctrlEnv(os.Environ()) - out, err := cmd.Output() - if err != nil { - return []string{}, err - } - v := strings.Replace(string(out), "{ ", "", 1) - v = strings.Replace(string(v), " }", "", 1) - values := strings.Fields(string(v)) - - return values, nil -} - func NumProcs() (uint64, error) { return NumProcsWithContext(context.Background()) } diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go index 206532126c9..52796ddbf50 100644 --- a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go @@ -4,28 +4,11 @@ package common import ( - "os" - "os/exec" - "strings" "unsafe" "golang.org/x/sys/unix" ) -func DoSysctrl(mib string) ([]string, error) { - cmd := exec.Command("sysctl", "-n", mib) - cmd.Env = getSysctrlEnv(os.Environ()) - out, err := cmd.Output() - if err != nil { - return []string{}, err - } - v := strings.Replace(string(out), "{ ", "", 1) - v = strings.Replace(string(v), " }", "", 1) - values := strings.Fields(string(v)) - - return values, nil -} - func CallSyscall(mib []int32) ([]byte, uint64, error) { mibptr := unsafe.Pointer(&mib[0]) miblen := uint64(len(mib)) diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go index 00fa19a2fb4..df44ac04288 100644 --- a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go @@ -4,28 +4,11 @@ package common import ( - "os" - "os/exec" - "strings" "unsafe" "golang.org/x/sys/unix" ) -func DoSysctrl(mib string) ([]string, error) { - cmd := exec.Command("sysctl", "-n", mib) - cmd.Env = getSysctrlEnv(os.Environ()) - out, err := cmd.Output() - if err != nil { - return []string{}, err - } - v := strings.Replace(string(out), "{ ", "", 1) - v = strings.Replace(string(v), " }", "", 1) - values := strings.Fields(string(v)) - - return values, nil -} - func CallSyscall(mib []int32) ([]byte, uint64, error) { mibptr := unsafe.Pointer(&mib[0]) miblen := uint64(len(mib)) diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go index eb0b63144be..e09768f3cc7 100644 --- a/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go @@ -1,7 +1,10 @@ // SPDX-License-Identifier: BSD-3-Clause package common -import "fmt" +import ( + "fmt" + "strings" +) const ( maxWarnings = 100 // An arbitrary limit to avoid excessive memory usage, it has no sense to store hundreds of errors @@ -33,9 +36,11 @@ func (w *Warnings) Reference() error { func (w *Warnings) Error() string { if w.Verbose { str := "" + var sb strings.Builder for i, e := range w.List { - str += fmt.Sprintf("\tError %d: %s\n", i, e.Error()) + sb.WriteString(fmt.Sprintf("\tError %d: %s\n", i, e.Error())) } + str += sb.String() if w.tooManyErrors { str += fmt.Sprintf("\t%s\n", tooManyErrorsMessage) } diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go index 659b65575bc..b69dfb605e1 100644 --- a/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go @@ -14,6 +14,8 @@ type ExVirtualMemory struct { ActiveAnon uint64 `json:"activeanon"` InactiveAnon uint64 `json:"inactiveanon"` Unevictable uint64 `json:"unevictable"` + Percpu uint64 `json:"percpu"` + KernelStack uint64 `json:"kernelstack"` } func (v ExVirtualMemory) String() string { diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go index 01932ddfda1..f4f46f0d2f9 100644 --- a/vendor/github.com/shirou/gopsutil/v4/mem/mem.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go @@ -48,10 +48,11 @@ type VirtualMemoryStat struct { Laundry uint64 `json:"laundry"` // Linux specific numbers - // https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-meminfo + // https://blogs.oracle.com/linux/understanding-linux-kernel-memory-statistics // https://www.kernel.org/doc/Documentation/filesystems/proc.txt // https://www.kernel.org/doc/Documentation/vm/overcommit-accounting // https://www.kernel.org/doc/Documentation/vm/transhuge.txt + // Buffers uint64 `json:"buffers"` Cached uint64 `json:"cached"` WriteBack uint64 `json:"writeBack"` diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go index 7d96a3bb098..1b3e9f21beb 100644 --- a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go @@ -85,26 +85,23 @@ func VirtualMemory() (*VirtualMemoryStat, error) { } func VirtualMemoryWithContext(_ context.Context) (*VirtualMemoryStat, error) { - machLib, err := common.NewLibrary(common.System) + sys, err := common.NewSystemLib() if err != nil { return nil, err } - defer machLib.Close() - - hostStatistics := common.GetFunc[common.HostStatisticsFunc](machLib, common.HostStatisticsSym) - machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) + defer sys.Close() count := uint32(common.HOST_VM_INFO_COUNT) var vmstat vmStatisticsData - status := hostStatistics(machHostSelf(), common.HOST_VM_INFO, + status := sys.HostStatistics(sys.MachHostSelf(), common.HOST_VM_INFO, uintptr(unsafe.Pointer(&vmstat)), &count) if status != common.KERN_SUCCESS { return nil, fmt.Errorf("host_statistics error=%d", status) } - pageSizeAddr, _ := machLib.Dlsym("vm_kernel_page_size") + pageSizeAddr, _ := sys.Dlsym("vm_kernel_page_size") pageSize := **(**uint64)(unsafe.Pointer(&pageSizeAddr)) total, err := getHwMemsize() if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go index 4b53b4a004e..9d969ba8393 100644 --- a/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go @@ -138,6 +138,12 @@ func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *ExVir return ret, retEx, err } retEx.Unevictable = t * 1024 + case "Percpu": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + retEx.Percpu = t * 1024 case "Writeback": t, err := strconv.ParseUint(value, 10, 64) if err != nil { @@ -181,6 +187,12 @@ func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *ExVir return ret, retEx, err } ret.Sunreclaim = t * 1024 + case "KernelStack": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + retEx.KernelStack = t * 1024 case "PageTables": t, err := strconv.ParseUint(value, 10, 64) if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go index f01b04b5038..d1e7f0ce773 100644 --- a/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go @@ -50,26 +50,25 @@ func IOCountersByFileWithContext(_ context.Context, pernic bool, filename string return nil, err } - parts := make([]string, 2) - statlen := len(lines) - 1 ret := make([]IOCountersStat, 0, statlen) for _, line := range lines[2:] { + // Split interface name and stats data at the last ":" separatorPos := strings.LastIndex(line, ":") if separatorPos == -1 { continue } - parts[0] = line[0:separatorPos] - parts[1] = line[separatorPos+1:] + interfacePart := line[0:separatorPos] + statsPart := line[separatorPos+1:] - interfaceName := strings.TrimSpace(parts[0]) + interfaceName := strings.TrimSpace(interfacePart) if interfaceName == "" { continue } - fields := strings.Fields(strings.TrimSpace(parts[1])) + fields := strings.Fields(strings.TrimSpace(statsPart)) bytesRecv, err := strconv.ParseUint(fields[0], 10, 64) if err != nil { return ret, err @@ -610,7 +609,7 @@ func getProcInodesAllWithContext(ctx context.Context, root string, maxConn int) return ret, nil } -// decodeAddress decode addresse represents addr in proc/net/* +// decodeAddress decode address represents addr in proc/net/* // ex: // "0500000A:0016" -> "10.0.0.5", 22 // "0085002452100113070057A13F025401:0035" -> "2400:8500:1301:1052:a157:7:154:23f", 53 diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process.go b/vendor/github.com/shirou/gopsutil/v4/process/process.go index 0bd4d9e1a10..5db5ff48192 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "errors" + "regexp" "runtime" "sort" "sync" @@ -18,6 +19,7 @@ import ( var ( invoke common.Invoker = common.Invoke{} + strictIntPtrn = regexp.MustCompile(`^\d+$`) ErrorNoChildren = errors.New("process does not have children") // Deprecated: ErrorNoChildren is never returned by process.Children(), check its returned []*Process slice length instead ErrorProcessNotRunning = errors.New("process does not exist") ErrorNotPermitted = errors.New("operation not permitted") diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go index e591e2d157d..d094d389d4d 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go @@ -36,10 +36,6 @@ func (*Process) NumCtxSwitchesWithContext(_ context.Context) (*NumCtxSwitchesSta return nil, common.ErrNotImplementedError } -func (*Process) NumFDsWithContext(_ context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - func (*Process) CPUAffinityWithContext(_ context.Context) ([]int32, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go index 6fd57f3d847..d7fb921cbf4 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go @@ -237,7 +237,7 @@ func (p *Process) getKProc() (*unix.KinfoProc, error) { // call ps command. // Return value deletes Header line(you must not input wrong arg). -// And splited by Space. Caller have responsibility to manage. +// And split by Space. Caller have responsibility to manage. // If passed arg pid is 0, get information from all process. func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption, nameOption bool) ([][]string, error) { var cmd []string @@ -280,31 +280,21 @@ func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption, } type dlFuncs struct { - lib *common.Library - - procPidPath common.ProcPidPathFunc - procPidInfo common.ProcPidInfoFunc - machTimeBaseInfo common.MachTimeBaseInfoFunc + lib *common.SystemLib } func loadProcFuncs() (*dlFuncs, error) { - lib, err := common.NewLibrary(common.System) + lib, err := common.NewSystemLib() if err != nil { return nil, err } - - return &dlFuncs{ - lib: lib, - procPidPath: common.GetFunc[common.ProcPidPathFunc](lib, common.ProcPidPathSym), - procPidInfo: common.GetFunc[common.ProcPidInfoFunc](lib, common.ProcPidInfoSym), - machTimeBaseInfo: common.GetFunc[common.MachTimeBaseInfoFunc](lib, common.MachTimeBaseInfoSym), - }, nil + return &dlFuncs{lib}, err } func (f *dlFuncs) getTimeScaleToNanoSeconds() float64 { var timeBaseInfo common.MachTimeBaseInfo - f.machTimeBaseInfo(uintptr(unsafe.Pointer(&timeBaseInfo))) + f.lib.MachTimeBaseInfo(uintptr(unsafe.Pointer(&timeBaseInfo))) return float64(timeBaseInfo.Numer) / float64(timeBaseInfo.Denom) } @@ -321,7 +311,7 @@ func (p *Process) ExeWithContext(_ context.Context) (string, error) { defer funcs.Close() buf := common.NewCStr(common.PROC_PIDPATHINFO_MAXSIZE) - ret := funcs.procPidPath(p.Pid, buf.Addr(), common.PROC_PIDPATHINFO_MAXSIZE) + ret := funcs.lib.ProcPidPath(p.Pid, buf.Addr(), common.PROC_PIDPATHINFO_MAXSIZE) if ret <= 0 { return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret) @@ -330,13 +320,6 @@ func (p *Process) ExeWithContext(_ context.Context) (string, error) { return buf.GoString(), nil } -// sys/proc_info.h -type vnodePathInfo struct { - _ [152]byte - vipPath [common.MAXPATHLEN]byte - _ [1176]byte -} - // CwdWithContext retrieves the Current Working Directory for the given process. // It uses the proc_pidinfo from libproc and will only work for processes the // EUID can access. Otherwise "operation not permitted" will be returned as the @@ -355,7 +338,7 @@ func (p *Process) CwdWithContext(_ context.Context) (string, error) { var vpi vnodePathInfo const vpiSize = int32(unsafe.Sizeof(vpi)) - ret := funcs.procPidInfo(p.Pid, common.PROC_PIDVNODEPATHINFO, 0, uintptr(unsafe.Pointer(&vpi)), vpiSize) + ret := funcs.lib.ProcPidInfo(p.Pid, common.PROC_PIDVNODEPATHINFO, 0, uintptr(unsafe.Pointer(&vpi)), vpiSize) errno, _ := funcs.lib.Dlsym("errno") err = *(**unix.Errno)(unsafe.Pointer(&errno)) if errors.Is(err, unix.EPERM) { @@ -369,11 +352,11 @@ func (p *Process) CwdWithContext(_ context.Context) (string, error) { if ret != vpiSize { return "", fmt.Errorf("too few bytes; expected %d, got %d", vpiSize, ret) } - return common.GoString(&vpi.vipPath[0]), nil + return common.GoString((*byte)(unsafe.Pointer(&vpi.Cdir.Path[0]))), nil } func procArgs(pid int32) ([]byte, int, error) { - procargs, _, err := common.CallSyscall([]int32{common.CTL_KERN, common.KERN_PROCARGS2, pid}) + procargs, err := unix.SysctlRaw("kern.procargs2", int(pid)) if err != nil { return nil, 0, err } @@ -447,7 +430,7 @@ func (p *Process) NumThreadsWithContext(_ context.Context) (int32, error) { defer funcs.Close() var ti ProcTaskInfo - funcs.procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) + funcs.lib.ProcPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) return int32(ti.Threadnum), nil } @@ -460,7 +443,7 @@ func (p *Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { defer funcs.Close() var ti ProcTaskInfo - funcs.procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) + funcs.lib.ProcPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) timescaleToNanoSeconds := funcs.getTimeScaleToNanoSeconds() ret := &cpu.TimesStat{ @@ -479,7 +462,7 @@ func (p *Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, err defer funcs.Close() var ti ProcTaskInfo - funcs.procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) + funcs.lib.ProcPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) ret := &MemoryInfoStat{ RSS: uint64(ti.Resident_size), @@ -488,3 +471,54 @@ func (p *Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, err } return ret, nil } + +// procFDInfo represents a file descriptor entry from sys/proc_info.h +type procFDInfo struct { + ProcFd int32 + ProcFdtype uint32 +} + +// NumFDsWithContext returns the number of file descriptors used by the process. +// It uses proc_pidinfo with PROC_PIDLISTFDS to query the kernel for the count +// of open file descriptors. The method makes a single syscall and calculates +// the count from the buffer size returned by the kernel. +func (p *Process) NumFDsWithContext(_ context.Context) (int32, error) { + funcs, err := loadProcFuncs() + if err != nil { + return 0, err + } + defer funcs.Close() + + // First call: get required buffer size + bufferSize := funcs.lib.ProcPidInfo( + p.Pid, + common.PROC_PIDLISTFDS, + 0, + 0, // NULL buffer + 0, // 0 size + ) + if bufferSize <= 0 { + return 0, fmt.Errorf("unknown error: proc_pidinfo returned %d", bufferSize) + } + + // Allocate buffer of the required size + const sizeofProcFDInfo = int32(unsafe.Sizeof(procFDInfo{})) + numEntries := bufferSize / sizeofProcFDInfo + buf := make([]procFDInfo, numEntries) + + // Second call: get actual data + ret := funcs.lib.ProcPidInfo( + p.Pid, + common.PROC_PIDLISTFDS, + 0, + uintptr(unsafe.Pointer(&buf[0])), // Real buffer + bufferSize, // Size from first call + ) + if ret <= 0 { + return 0, fmt.Errorf("unknown error: proc_pidinfo returned %d", ret) + } + + // Calculate actual number of FDs returned + numFDs := ret / sizeofProcFDInfo + return numFDs, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go index 890a5d5331a..a40f96195d8 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go @@ -233,6 +233,51 @@ type ProcTaskInfo struct { Priority int32 } +type vinfoStat struct { + Dev uint32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Atime int64 + Atimensec int64 + Mtime int64 + Mtimensec int64 + Ctime int64 + Ctimensec int64 + Birthtime int64 + Birthtimensec int64 + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Rdev uint32 + Qspare [2]int64 +} + +type fsid struct { + Val [2]int32 +} + +type vnodeInfo struct { + Stat vinfoStat + Type int32 + Pad int32 + Fsid fsid +} + +type vnodeInfoPath struct { + Vi vnodeInfo + Path [1024]int8 +} + +type vnodePathInfo struct { + Cdir vnodeInfoPath + Rdir vnodeInfoPath +} + type AuditinfoAddr struct { Auid uint32 Mask AuMask diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go index 8075cf227d1..7ab1afb1170 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go @@ -211,6 +211,51 @@ type ProcTaskInfo struct { Priority int32 } +type vinfoStat struct { + Dev uint32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Atime int64 + Atimensec int64 + Mtime int64 + Mtimensec int64 + Ctime int64 + Ctimensec int64 + Birthtime int64 + Birthtimensec int64 + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Rdev uint32 + Qspare [2]int64 +} + +type fsid struct { + Val [2]int32 +} + +type vnodeInfo struct { + Stat vinfoStat + Type int32 + Pad int32 + Fsid fsid +} + +type vnodeInfoPath struct { + Vi vnodeInfo + Path [1024]int8 +} + +type vnodePathInfo struct { + Cdir vnodeInfoPath + Rdir vnodeInfoPath +} + type AuditinfoAddr struct { Auid uint32 Mask AuMask diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go index ae173ff1de1..283af9bb3fd 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go @@ -344,6 +344,10 @@ func ProcessesWithContext(ctx context.Context) ([]*Process, error) { return results, nil } +func (*Process) NumFDsWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + func (p *Process) getKProc() (*KinfoProc, error) { mib := []int32{CTLKern, KernProc, KernProcPID, p.Pid} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go index a6279d1228b..764523dcf7d 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go @@ -346,7 +346,7 @@ func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { ret := make([]*Process, 0, len(statFiles)) for _, statFile := range statFiles { statContents, err := os.ReadFile(statFile) - if err != nil { + if err != nil || len(statContents) == 0 { continue } fields := splitProcStat(statContents) @@ -1038,6 +1038,9 @@ func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (ui } contents, err := os.ReadFile(statPath) + if err == nil && len(contents) == 0 { + err = fmt.Errorf("process %d: stat file is empty, process may have terminated", pid) + } if err != nil { return 0, 0, nil, 0, 0, 0, nil, err } @@ -1173,6 +1176,9 @@ func readPidsFromDir(path string) ([]int32, error) { return nil, err } for _, fname := range fnames { + if !strictIntPtrn.MatchString(fname) { + continue + } pid, err := strconv.ParseInt(fname, 10, 32) if err != nil { // if not numeric name, just skip diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go index 11bc5c18d22..31fdb85bc9d 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go @@ -342,6 +342,10 @@ func ProcessesWithContext(ctx context.Context) ([]*Process, error) { return results, nil } +func (*Process) NumFDsWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + func (p *Process) getKProc() (*KinfoProc, error) { buf, length, err := callKernProcSyscall(KernProcPID, p.Pid) if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go index 9fe55b490ec..9f0e93f31a2 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go @@ -26,7 +26,9 @@ func getTerminalMap() (map[uint64]string, error) { ret := make(map[uint64]string) var termfiles []string - d, err := os.Open("/dev") + devPath := common.HostDev() + + d, err := os.Open(devPath) if err != nil { return nil, err } @@ -37,32 +39,29 @@ func getTerminalMap() (map[uint64]string, error) { return nil, err } for _, devname := range devnames { - if strings.HasPrefix(devname, "/dev/tty") { - termfiles = append(termfiles, "/dev/tty/"+devname) + if strings.HasPrefix(devname, "tty") { + termfiles = append(termfiles, filepath.Join(devPath, devname)) } } var ptsnames []string - ptsd, err := os.Open("/dev/pts") + ptsPath := filepath.Join(devPath, "pts") + ptsd, err := os.Open(ptsPath) if err != nil { - ptsnames, _ = filepath.Glob("/dev/ttyp*") + ptsnames, _ = filepath.Glob(filepath.Join(devPath, "ttyp*")) if ptsnames == nil { return nil, err } - } - defer ptsd.Close() - - if ptsnames == nil { + termfiles = append(termfiles, ptsnames...) + } else { defer ptsd.Close() ptsnames, err = ptsd.Readdirnames(-1) if err != nil { return nil, err } for _, ptsname := range ptsnames { - termfiles = append(termfiles, "/dev/pts/"+ptsname) + termfiles = append(termfiles, filepath.Join(ptsPath, ptsname)) } - } else { - termfiles = ptsnames } for _, name := range termfiles { @@ -72,7 +71,7 @@ func getTerminalMap() (map[uint64]string, error) { return nil, err } rdev := uint64(stat.Rdev) - ret[rdev] = strings.ReplaceAll(name, "/dev", "") + ret[rdev] = strings.TrimPrefix(name, devPath+string(os.PathSeparator)) } return ret, nil } diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go index 685a3cc32de..547d2287216 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go @@ -289,6 +289,9 @@ func readPidsFromDir(path string) ([]int32, error) { return nil, err } for _, fname := range fnames { + if !strictIntPtrn.MatchString(fname) { + continue + } pid, err := strconv.ParseInt(fname, 10, 32) if err != nil { // if not numeric name, just skip diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go index 15fa18d82a9..a1e28be6985 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go @@ -699,6 +699,7 @@ func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, er if err != nil { return nil, err } + defer windows.CloseHandle(process) buffer := make([]byte, 1024) var size uint32 @@ -747,9 +748,10 @@ func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, er } var fileName string - ch := make(chan struct{}) + ch := make(chan struct{}, 1) go func() { + defer close(ch) var buf [syscall.MAX_LONG_PATH]uint16 n, err := windows.GetFinalPathNameByHandle(windows.Handle(file), &buf[0], syscall.MAX_LONG_PATH, 0) if err != nil { diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 6acf8ab1ea0..104dc244071 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -57,3 +57,10 @@ linters: - common-false-positives - legacy - std-error-handling + settings: + govet: + # Disable buildtag check to allow dual build tag syntax (both //go:build and // +build). + # This is necessary for Go 1.15 compatibility since //go:build was introduced in Go 1.17. + # This can be removed once Cobra requires Go 1.17 or higher. + disable: + - buildtag diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 78088db69ca..c05fed45aef 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -557,7 +557,7 @@ func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { } } -var minUsagePadding = 25 +const minUsagePadding = 25 // UsagePadding return padding for the usage. func (c *Command) UsagePadding() int { @@ -567,7 +567,7 @@ func (c *Command) UsagePadding() int { return c.parent.commandsMaxUseLen } -var minCommandPathPadding = 11 +const minCommandPathPadding = 11 // CommandPathPadding return padding for the command path. func (c *Command) CommandPathPadding() int { @@ -577,7 +577,7 @@ func (c *Command) CommandPathPadding() int { return c.parent.commandsMaxCommandPathLen } -var minNamePadding = 11 +const minNamePadding = 11 // NamePadding returns padding for the name. func (c *Command) NamePadding() int { @@ -1939,7 +1939,7 @@ type tmplFunc struct { fn func(io.Writer, interface{}) error } -var defaultUsageTemplate = `Usage:{{if .Runnable}} +const defaultUsageTemplate = `Usage:{{if .Runnable}} {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} @@ -2039,7 +2039,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error { return nil } -var defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} +const defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} {{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` @@ -2061,7 +2061,7 @@ func defaultHelpFunc(w io.Writer, in interface{}) error { return nil } -var defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +const defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} ` // defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync. diff --git a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml index 495e5e633d8..61724abecbf 100644 --- a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml +++ b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml @@ -1,6 +1,6 @@ env: CIRRUS_CLONE_DEPTH: 1 - GO_VERSION: go1.24.0 + GO_VERSION: go1.25.0 freebsd_13_task: freebsd_instance: diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go b/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go index 40f6c345fcd..87cf6a10a47 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go @@ -25,10 +25,13 @@ const ( _POSIX2_UPE = -1 ) -var clktck struct { - sync.Once - v int64 -} +var clktck = sync.OnceValue(func() int64 { + ci, err := unix.SysctlClockinfo("kern.clockrate") + if err != nil { + return -1 + } + return int64(ci.Hz) +}) func sysconfPOSIX(name int) (int64, error) { // NetBSD does not define all _POSIX_* values used in sysconf_posix.go @@ -54,14 +57,7 @@ func sysconf(name int) (int64, error) { } return -1, nil case SC_CLK_TCK: - // TODO: use sync.OnceValue once Go 1.21 is the minimal supported version - clktck.Do(func() { - clktck.v = -1 - if ci, err := unix.SysctlClockinfo("kern.clockrate"); err == nil { - clktck.v = int64(ci.Hz) - } - }) - return clktck.v, nil + return clktck(), nil case SC_NGROUPS_MAX: return sysctl32("kern.ngroups"), nil case SC_JOB_CONTROL: diff --git a/vendor/github.com/tklauser/numcpus/.cirrus.yml b/vendor/github.com/tklauser/numcpus/.cirrus.yml index 495e5e633d8..61724abecbf 100644 --- a/vendor/github.com/tklauser/numcpus/.cirrus.yml +++ b/vendor/github.com/tklauser/numcpus/.cirrus.yml @@ -1,6 +1,6 @@ env: CIRRUS_CLONE_DEPTH: 1 - GO_VERSION: go1.24.0 + GO_VERSION: go1.25.0 freebsd_13_task: freebsd_instance: diff --git a/vendor/github.com/tklauser/numcpus/numcpus_linux.go b/vendor/github.com/tklauser/numcpus/numcpus_linux.go index 7b991da468f..d05ee982539 100644 --- a/vendor/github.com/tklauser/numcpus/numcpus_linux.go +++ b/vendor/github.com/tklauser/numcpus/numcpus_linux.go @@ -47,10 +47,12 @@ func readCPURangeWith[T any](file string, f func(cpus string) (T, error)) (T, er if err != nil { return zero, err } - return f(strings.Trim(string(buf), "\n ")) + return f(string(buf)) } func countCPURange(cpus string) (int, error) { + cpus = strings.Trim(cpus, "\n ") + // Treat empty file as valid. This might be the case if there are no offline CPUs in which // case /sys/devices/system/cpu/offline is empty. if cpus == "" { @@ -58,7 +60,7 @@ func countCPURange(cpus string) (int, error) { } n := int(0) - for _, cpuRange := range strings.Split(cpus, ",") { + for cpuRange := range strings.SplitSeq(cpus, ",") { if cpuRange == "" { return 0, fmt.Errorf("empty CPU range in CPU string %q", cpus) } @@ -84,13 +86,15 @@ func countCPURange(cpus string) (int, error) { } func listCPURange(cpus string) ([]int, error) { + cpus = strings.Trim(cpus, "\n ") + // See comment in countCPURange. if cpus == "" { return []int{}, nil } list := []int{} - for _, cpuRange := range strings.Split(cpus, ",") { + for cpuRange := range strings.SplitSeq(cpus, ",") { if cpuRange == "" { return nil, fmt.Errorf("empty CPU range in CPU string %q", cpus) } diff --git a/vendor/go.opentelemetry.io/collector/component/component.go b/vendor/go.opentelemetry.io/collector/component/component.go index 5a32c5041d7..1da518ac335 100644 --- a/vendor/go.opentelemetry.io/collector/component/component.go +++ b/vendor/go.opentelemetry.io/collector/component/component.go @@ -46,7 +46,7 @@ type Component interface { // If there are any background operations running by the component they must be aborted before // this function returns. Remember that if you started any long-running background operations from // the Start() method, those operations must be also cancelled. If there are any buffers in the - // component, they should be cleared and the data sent immediately to the next component. + // component, they should be flushed with the data being sent immediately to the next component. // // The component's lifecycle is completed once the Shutdown() method returns. No other // methods of the component are called after that. If necessary a new component with diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/nop_host.go b/vendor/go.opentelemetry.io/collector/component/componenttest/nop_host.go index ac68689eb41..4bc85c15ad6 100644 --- a/vendor/go.opentelemetry.io/collector/component/componenttest/nop_host.go +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/nop_host.go @@ -19,7 +19,7 @@ func NewNopHost() component.Host { return &nopHost{} } -// GetExtensions returns a `nil` extensions map. +// GetExtensions returns an empty extensions map. func (nh *nopHost) GetExtensions() map[component.ID]component.Component { - return nil + return map[component.ID]component.Component{} } diff --git a/vendor/go.opentelemetry.io/collector/component/host.go b/vendor/go.opentelemetry.io/collector/component/host.go index dc8210ffbe3..f425b76bb67 100644 --- a/vendor/go.opentelemetry.io/collector/component/host.go +++ b/vendor/go.opentelemetry.io/collector/component/host.go @@ -17,5 +17,7 @@ type Host interface { // // GetExtensions can be called by the component anytime after Component.Start() begins and // until Component.Shutdown() ends. + // + // The returned map should only be nil if the host does not support extensions at all. GetExtensions() map[ID]Component } diff --git a/vendor/go.opentelemetry.io/collector/component/telemetry.go b/vendor/go.opentelemetry.io/collector/component/telemetry.go index 461dead4b32..7d39c18ab1d 100644 --- a/vendor/go.opentelemetry.io/collector/component/telemetry.go +++ b/vendor/go.opentelemetry.io/collector/component/telemetry.go @@ -4,8 +4,31 @@ package component // import "go.opentelemetry.io/collector/component" import ( - "go.opentelemetry.io/collector/internal/telemetry" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/pdata/pcommon" ) // TelemetrySettings provides components with APIs to report telemetry. -type TelemetrySettings = telemetry.TelemetrySettings +type TelemetrySettings struct { + // Logger that the factory can use during creation and can pass to the created + // component to be used later as well. + Logger *zap.Logger + + // TracerProvider that the factory can pass to other instrumented third-party libraries. + // + // The service may wrap this provider for attribute injection. The wrapper may implement an + // additional `Unwrap() trace.TracerProvider` method to grant access to the underlying SDK. + TracerProvider trace.TracerProvider + + // MeterProvider that the factory can pass to other instrumented third-party libraries. + MeterProvider metric.MeterProvider + + // Resource contains the resource attributes for the collector's telemetry. + Resource pcommon.Resource + + // prevent unkeyed literal initialization + _ struct{} +} diff --git a/vendor/go.opentelemetry.io/collector/config/configauth/README.md b/vendor/go.opentelemetry.io/collector/config/configauth/README.md index 25ed0be9c87..b50ebcbaa8d 100644 --- a/vendor/go.opentelemetry.io/collector/config/configauth/README.md +++ b/vendor/go.opentelemetry.io/collector/config/configauth/README.md @@ -59,7 +59,7 @@ receivers: ## oidc is the extension name to use as the authenticator for this receiver authenticator: oidc - otlphttp/withauth: + otlp_http/withauth: endpoint: http://localhost:9000 auth: authenticator: oauth2client diff --git a/vendor/go.opentelemetry.io/collector/config/configauth/config.yaml b/vendor/go.opentelemetry.io/collector/config/configauth/config.yaml new file mode 100644 index 00000000000..a3649bbe6f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configauth/config.yaml @@ -0,0 +1,9 @@ +$defs: + config: + description: Config defines the auth settings for the receiver. + type: object + properties: + authenticator: + description: AuthenticatorID specifies the name of the extension to use in order to authenticate the incoming data point. + type: string + x-customType: go.opentelemetry.io/collector/component.ID diff --git a/vendor/go.opentelemetry.io/collector/config/configcompression/config.yaml b/vendor/go.opentelemetry.io/collector/config/configcompression/config.yaml new file mode 100644 index 00000000000..0c089957fd8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configcompression/config.yaml @@ -0,0 +1,11 @@ +$defs: + compression_params: + type: object + properties: + level: + $ref: level + level: + type: integer + type: + description: Type represents a compression method + type: string diff --git a/vendor/go.opentelemetry.io/collector/config/configgrpc/README.md b/vendor/go.opentelemetry.io/collector/config/configgrpc/README.md index e1e652591f6..1f469d58d81 100644 --- a/vendor/go.opentelemetry.io/collector/config/configgrpc/README.md +++ b/vendor/go.opentelemetry.io/collector/config/configgrpc/README.md @@ -35,7 +35,7 @@ Example: ```yaml exporters: - otlp: + otlp_grpc: endpoint: otelcol2:55690 auth: authenticator: some-authenticator-extension diff --git a/vendor/go.opentelemetry.io/collector/config/configgrpc/config.schema.yaml b/vendor/go.opentelemetry.io/collector/config/configgrpc/config.schema.yaml new file mode 100644 index 00000000000..0419d72486f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configgrpc/config.schema.yaml @@ -0,0 +1,142 @@ +$defs: + client_config: + description: ClientConfig defines common settings for a gRPC client configuration. + type: object + properties: + auth: + description: Auth configuration for outgoing RPCs. + x-optional: true + $ref: go.opentelemetry.io/collector/config/configauth.config + authority: + description: WithAuthority parameter configures client to rewrite ":authority" header (godoc.org/google.golang.org/grpc#WithAuthority) + type: string + balancer_name: + description: Sets the balancer in grpclb_policy to discover the servers. Default is pick_first. https://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md + type: string + compression: + description: The compression key for supported compression types within collector. + $ref: go.opentelemetry.io/collector/config/configcompression.type + endpoint: + description: The target to which the exporter is going to send traces or metrics, using the gRPC protocol. The valid syntax is described at https://github.com/grpc/grpc/blob/master/doc/naming.md. + type: string + headers: + description: The headers associated with gRPC requests. + $ref: go.opentelemetry.io/collector/config/configopaque.map_list + keepalive: + description: The keepalive parameters for gRPC client. See grpc.WithKeepaliveParams. (https://godoc.org/google.golang.org/grpc#WithKeepaliveParams). + x-optional: true + $ref: keepalive_client_config + middlewares: + description: Middlewares for the gRPC client. + type: array + items: + $ref: go.opentelemetry.io/collector/config/configmiddleware.config + read_buffer_size: + description: ReadBufferSize for gRPC client. See grpc.WithReadBufferSize. (https://godoc.org/google.golang.org/grpc#WithReadBufferSize). + type: integer + tls: + description: TLS struct exposes TLS client configuration. + $ref: go.opentelemetry.io/collector/config/configtls.client_config + wait_for_ready: + description: WaitForReady parameter configures client to wait for ready state before sending data. (https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md) + type: boolean + write_buffer_size: + description: WriteBufferSize for gRPC gRPC. See grpc.WithWriteBufferSize. (https://godoc.org/google.golang.org/grpc#WithWriteBufferSize). + type: integer + keepalive_client_config: + description: 'KeepaliveClientConfig exposes the keepalive.ClientParameters to be used by the exporter. Refer to the original data-structure for the meaning of each parameter: https://godoc.org/google.golang.org/grpc/keepalive#ClientParameters' + type: object + properties: + permit_without_stream: + type: boolean + time: + type: string + x-customType: time.Duration + format: duration + timeout: + type: string + x-customType: time.Duration + format: duration + keepalive_enforcement_policy: + description: KeepaliveEnforcementPolicy allow configuration of the keepalive.EnforcementPolicy. The same default values as keepalive.EnforcementPolicy are applicable and get applied by the server. See https://godoc.org/google.golang.org/grpc/keepalive#EnforcementPolicy for details. + type: object + properties: + min_time: + type: string + x-customType: time.Duration + format: duration + permit_without_stream: + type: boolean + keepalive_server_config: + description: KeepaliveServerConfig is the configuration for keepalive. + type: object + properties: + enforcement_policy: + x-optional: true + $ref: keepalive_enforcement_policy + server_parameters: + x-optional: true + $ref: keepalive_server_parameters + keepalive_server_parameters: + description: KeepaliveServerParameters allow configuration of the keepalive.ServerParameters. The same default values as keepalive.ServerParameters are applicable and get applied by the server. See https://godoc.org/google.golang.org/grpc/keepalive#ServerParameters for details. + type: object + properties: + max_connection_age: + type: string + x-customType: time.Duration + format: duration + max_connection_age_grace: + type: string + x-customType: time.Duration + format: duration + max_connection_idle: + type: string + x-customType: time.Duration + format: duration + time: + type: string + x-customType: time.Duration + format: duration + timeout: + type: string + x-customType: time.Duration + format: duration + server_config: + description: ServerConfig defines common settings for a gRPC server configuration. + type: object + properties: + auth: + description: Auth for this receiver + x-optional: true + $ref: go.opentelemetry.io/collector/config/configauth.config + include_metadata: + description: Include propagates the incoming connection's metadata to downstream consumers. + type: boolean + keepalive: + description: Keepalive anchor for all the settings related to keepalive. + x-optional: true + $ref: keepalive_server_config + max_concurrent_streams: + description: MaxConcurrentStreams sets the limit on the number of concurrent streams to each ServerTransport. It has effect only for streaming RPCs. + type: integer + x-customType: uint32 + max_recv_msg_size_mib: + description: MaxRecvMsgSizeMiB sets the maximum size (in MiB) of messages accepted by the server. + type: integer + middlewares: + description: Middlewares for the gRPC server. + type: array + items: + $ref: go.opentelemetry.io/collector/config/configmiddleware.config + read_buffer_size: + description: ReadBufferSize for gRPC server. See grpc.ReadBufferSize. (https://godoc.org/google.golang.org/grpc#ReadBufferSize). + type: integer + tls: + description: Configures the protocol to use TLS. The default value is nil, which will cause the protocol to not use TLS. + x-optional: true + $ref: go.opentelemetry.io/collector/config/configtls.server_config + write_buffer_size: + description: WriteBufferSize for gRPC server. See grpc.WriteBufferSize. (https://godoc.org/google.golang.org/grpc#WriteBufferSize). + type: integer + allOf: + - $ref: go.opentelemetry.io/collector/config/confignet.addr_config diff --git a/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go b/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go index 1d329eacaab..09231795209 100644 --- a/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go +++ b/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go @@ -9,6 +9,9 @@ import ( "errors" "fmt" "math" + "net" + "regexp" + "strconv" "strings" "time" @@ -95,7 +98,7 @@ type ClientConfig struct { WaitForReady bool `mapstructure:"wait_for_ready,omitempty"` // The headers associated with gRPC requests. - Headers map[string]configopaque.String `mapstructure:"headers,omitempty"` + Headers configopaque.MapList `mapstructure:"headers,omitempty"` // Sets the balancer in grpclb_policy to discover the servers. Default is pick_first. // https://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md @@ -224,6 +227,24 @@ func NewDefaultServerConfig() ServerConfig { } func (cc *ClientConfig) Validate() error { + if after, ok := strings.CutPrefix(cc.Endpoint, "unix://"); ok { + if after == "" { + return errors.New("unix socket path cannot be empty") + } + return nil + } + + if endpoint := cc.sanitizedEndpoint(); endpoint != "" { + // Validate that the port is in the address + _, port, err := net.SplitHostPort(endpoint) + if err != nil { + return err + } + if _, err := strconv.Atoi(port); err != nil { + return fmt.Errorf(`invalid port "%v"`, port) + } + } + if cc.BalancerName != "" { if balancer.Get(cc.BalancerName) == nil { return fmt.Errorf("invalid balancer_name: %s", cc.BalancerName) @@ -240,6 +261,9 @@ func (cc *ClientConfig) sanitizedEndpoint() string { return strings.TrimPrefix(cc.Endpoint, "http://") case cc.isSchemeHTTPS(): return strings.TrimPrefix(cc.Endpoint, "https://") + case strings.HasPrefix(cc.Endpoint, "dns://"): + r := regexp.MustCompile(`^dns:///?`) + return r.ReplaceAllString(cc.Endpoint, "") default: return cc.Endpoint } @@ -272,24 +296,36 @@ func (grpcDialOptionWrapper) isToClientConnOption() {} // a non-blocking dial (the function won't wait for connections to be // established, and connecting happens in the background). To make it a blocking // dial, use the WithGrpcDialOption(grpc.WithBlock()) option. +// +// To allow the configuration to reference middleware or authentication extensions, +// the `extensions` argument should be the output of `host.GetExtensions()`. +// It may also be `nil` in tests where no such extension is expected to be used. func (cc *ClientConfig) ToClientConn( ctx context.Context, - host component.Host, + extensions map[component.ID]component.Component, settings component.TelemetrySettings, extraOpts ...ToClientConnOption, ) (*grpc.ClientConn, error) { - grpcOpts, err := cc.getGrpcDialOptions(ctx, host, settings, extraOpts) + grpcOpts, err := cc.getGrpcDialOptions(ctx, extensions, settings, extraOpts) + if err != nil { + return nil, err + } + conn, err := grpc.NewClient(cc.sanitizedEndpoint(), grpcOpts...) if err != nil { return nil, err } - //nolint:staticcheck // SA1019 see https://github.com/open-telemetry/opentelemetry-collector/pull/11575 - return grpc.DialContext(ctx, cc.sanitizedEndpoint(), grpcOpts...) + + // Initiate connection to match the previous behavior of DialContext + // This ensures the connection is established eagerly rather than lazily + conn.Connect() + + return conn, nil } func (cc *ClientConfig) addHeadersIfAbsent(ctx context.Context) context.Context { kv := make([]string, 0, 2*len(cc.Headers)) existingMd, _ := metadata.FromOutgoingContext(ctx) - for k, v := range cc.Headers { + for k, v := range cc.Headers.Iter { if len(existingMd.Get(k)) == 0 { kv = append(kv, k, string(v)) } @@ -299,7 +335,7 @@ func (cc *ClientConfig) addHeadersIfAbsent(ctx context.Context) context.Context func (cc *ClientConfig) getGrpcDialOptions( ctx context.Context, - host component.Host, + extensions map[component.ID]component.Component, settings component.TelemetrySettings, extraOpts []ToClientConnOption, ) ([]grpc.DialOption, error) { @@ -343,11 +379,11 @@ func (cc *ClientConfig) getGrpcDialOptions( } if cc.Auth.HasValue() { - if host.GetExtensions() == nil { - return nil, errors.New("no extensions configuration available") + if extensions == nil { + return nil, errors.New("authentication was configured but this component or its host does not support extensions") } - grpcAuthenticator, cerr := cc.Auth.Get().GetGRPCClientAuthenticator(ctx, host.GetExtensions()) + grpcAuthenticator, cerr := cc.Auth.Get().GetGRPCClientAuthenticator(ctx, extensions) if cerr != nil { return nil, cerr } @@ -388,8 +424,11 @@ func (cc *ClientConfig) getGrpcDialOptions( } // Apply middleware options. Note: OpenTelemetry could be registered as an extension. + if len(cc.Middlewares) > 0 && extensions == nil { + return nil, errors.New("middlewares were configured but this component or its host does not support extensions") + } for _, middleware := range cc.Middlewares { - middlewareOptions, err := middleware.GetGRPCClientOptions(ctx, host.GetExtensions()) + middlewareOptions, err := middleware.GetGRPCClientOptions(ctx, extensions) if err != nil { return nil, fmt.Errorf("failed to get gRPC client options from middleware: %w", err) } @@ -437,13 +476,17 @@ func WithGrpcServerOption(opt grpc.ServerOption) ToServerOption { func (grpcServerOptionWrapper) isToServerOption() {} // ToServer returns a [grpc.Server] for the configuration. +// +// To allow the configuration to reference middleware or authentication extensions, +// the `extensions` argument should be the output of `host.GetExtensions()`. +// It may also be `nil` in tests where no such extension is expected to be used. func (sc *ServerConfig) ToServer( ctx context.Context, - host component.Host, + extensions map[component.ID]component.Component, settings component.TelemetrySettings, extraOpts ...ToServerOption, ) (*grpc.Server, error) { - grpcOpts, err := sc.getGrpcServerOptions(ctx, host, settings, extraOpts) + grpcOpts, err := sc.getGrpcServerOptions(ctx, extensions, settings, extraOpts) if err != nil { return nil, err } @@ -452,7 +495,7 @@ func (sc *ServerConfig) ToServer( func (sc *ServerConfig) getGrpcServerOptions( ctx context.Context, - host component.Host, + extensions map[component.ID]component.Component, settings component.TelemetrySettings, extraOpts []ToServerOption, ) ([]grpc.ServerOption, error) { @@ -514,8 +557,12 @@ func (sc *ServerConfig) getGrpcServerOptions( var uInterceptors []grpc.UnaryServerInterceptor var sInterceptors []grpc.StreamServerInterceptor + // Add client info first, before auth. + uInterceptors = append(uInterceptors, enhanceWithClientInformation(sc.IncludeMetadata)) + sInterceptors = append(sInterceptors, enhanceStreamWithClientInformation(sc.IncludeMetadata)) //nolint:contextcheck // context already handled + if sc.Auth.HasValue() { - authenticator, err := sc.Auth.Get().GetServerAuthenticator(ctx, host.GetExtensions()) + authenticator, err := sc.Auth.Get().GetServerAuthenticator(ctx, extensions) if err != nil { return nil, err } @@ -531,15 +578,11 @@ func (sc *ServerConfig) getGrpcServerOptions( } // Enable OpenTelemetry observability plugin. - - uInterceptors = append(uInterceptors, enhanceWithClientInformation(sc.IncludeMetadata)) - sInterceptors = append(sInterceptors, enhanceStreamWithClientInformation(sc.IncludeMetadata)) //nolint:contextcheck // context already handled - opts = append(opts, grpc.StatsHandler(otelgrpc.NewServerHandler(otelOpts...)), grpc.ChainUnaryInterceptor(uInterceptors...), grpc.ChainStreamInterceptor(sInterceptors...)) // Apply middleware options. Note: OpenTelemetry could be registered as an extension. for _, middleware := range sc.Middlewares { - middlewareOptions, err := middleware.GetGRPCServerOptions(ctx, host.GetExtensions()) + middlewareOptions, err := middleware.GetGRPCServerOptions(ctx, extensions) if err != nil { return nil, fmt.Errorf("failed to get gRPC server options from middleware: %w", err) } @@ -611,6 +654,10 @@ func authUnaryServerInterceptor(server extensionauth.Server) grpc.UnaryServerInt ctx, err := server.Authenticate(ctx, headers) if err != nil { + if s, ok := status.FromError(err); ok { + return nil, s.Err() + } + return nil, status.Error(codes.Unauthenticated, err.Error()) } @@ -628,6 +675,10 @@ func authStreamServerInterceptor(server extensionauth.Server) grpc.StreamServerI ctx, err := server.Authenticate(ctx, headers) if err != nil { + if s, ok := status.FromError(err); ok { + return s.Err() + } + return status.Error(codes.Unauthenticated, err.Error()) } diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/README.md b/vendor/go.opentelemetry.io/collector/config/confighttp/README.md index 08987e61161..ba13f30fcfa 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/README.md +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/README.md @@ -15,6 +15,7 @@ configuration. For more information, see [configtls README](../configtls/README.md). - `endpoint`: address:port +- `proxy_url`: Proxy URL to use for HTTP requests - [`tls`](../configtls/README.md) - [`headers`](https://pkg.go.dev/net/http#Request): name/value pairs added to the HTTP request headers - certain headers such as Content-Length and Connection are automatically written when needed and values in Header may be ignored. @@ -67,7 +68,7 @@ Example: ```yaml exporter: - otlphttp: + otlp_http: endpoint: otelcol2:55690 auth: authenticator: some-authenticator-extension @@ -106,9 +107,17 @@ will not be enabled. header, allowing clients to cache the response to CORS preflight requests. If not set, browsers use a default of 5 seconds. - `endpoint`: Valid value syntax available [here](https://github.com/grpc/grpc/blob/master/doc/naming.md) +- `transport`: The transport protocol to use. Defaults to `tcp`. See the [confignet README](../confignet/README.md) for more information. - `max_request_body_size`: configures the maximum allowed body size in bytes for a single request. Default: `20971520` (20MiB) +- `include_metadata`: propagates the client metadata from the incoming requests to the downstream consumers. Default: `false` +- `response_headers`: Additional headers attached to each HTTP response sent to the client. Header values are opaque since they may be sensitive - `compression_algorithms`: configures the list of compression algorithms the server can accept. Default: ["", "gzip", "zstd", "zlib", "snappy", "deflate", "lz4"] - `x-snappy-framed` can be used if feature gate `confighttp.snappyFramed` is enabled. +- `read_timeout`: maximum duration for reading the entire request, including the body. A zero or negative value means there will be no timeout. Default: `0` (no timeout) +- `read_header_timeout`: amount of time allowed to read request headers. If zero, the value of `read_timeout` is used. If both are zero, there is no timeout. Default: `1m` +- `write_timeout`: maximum duration before timing out writes of the response. A zero or negative value means there will be no timeout. Default: `30s` +- `idle_timeout`: maximum amount of time to wait for the next request when keep-alives are enabled. If zero, the value of `read_timeout` is used. If both are zero, there is no timeout. Default: `1m` +- `keep_alives_enabled`: controls whether HTTP keep-alives are enabled. Default: `true` - [`tls`](../configtls/README.md) - [`auth`](../configauth/README.md) - `request_params`: a list of query parameter names to add to the auth context, along with the HTTP headers diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/client.go b/vendor/go.opentelemetry.io/collector/config/confighttp/client.go index 41e5dd1b53d..ec16cc293d5 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/client.go +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/client.go @@ -56,7 +56,7 @@ type ClientConfig struct { // Additional headers attached to each HTTP request sent by the client. // Existing header values are overwritten if collision happens. // Header values are opaque since they may be sensitive. - Headers map[string]configopaque.String `mapstructure:"headers,omitempty"` + Headers configopaque.MapList `mapstructure:"headers,omitempty"` // Auth configuration for outgoing HTTP calls. Auth configoptional.Optional[configauth.Config] `mapstructure:"auth,omitempty"` @@ -101,7 +101,7 @@ type ClientConfig struct { // If not set or set to 0, it defaults to 15s. HTTP2PingTimeout time.Duration `mapstructure:"http2_ping_timeout,omitempty"` // Cookies configures the cookie management of the HTTP client. - Cookies CookiesConfig `mapstructure:"cookies,omitempty"` + Cookies configoptional.Optional[CookiesConfig] `mapstructure:"cookies,omitempty"` // Enabling ForceAttemptHTTP2 forces the HTTP transport to use the HTTP/2 protocol. // By default, this is set to true. @@ -116,9 +116,7 @@ type ClientConfig struct { // CookiesConfig defines the configuration of the HTTP client regarding cookies served by the server. type CookiesConfig struct { - // Enabled if true, cookies from HTTP responses will be reused in further HTTP requests with the same server. - Enabled bool `mapstructure:"enabled,omitempty"` - _ struct{} + _ struct{} } // NewDefaultClientConfig returns ClientConfig type object with @@ -130,7 +128,6 @@ func NewDefaultClientConfig() ClientConfig { defaultTransport := http.DefaultTransport.(*http.Transport) return ClientConfig{ - Headers: map[string]configopaque.String{}, MaxIdleConns: defaultTransport.MaxIdleConns, IdleConnTimeout: defaultTransport.IdleConnTimeout, ForceAttemptHTTP2: true, @@ -154,7 +151,11 @@ type ToClientOption interface { } // ToClient creates an HTTP client. -func (cc *ClientConfig) ToClient(ctx context.Context, host component.Host, settings component.TelemetrySettings, _ ...ToClientOption) (*http.Client, error) { +// +// To allow the configuration to reference middleware or authentication extensions, +// the `extensions` argument should be the output of `host.GetExtensions()`. +// It may also be `nil` in tests where no such extension is expected to be used. +func (cc *ClientConfig) ToClient(ctx context.Context, extensions map[component.ID]component.Component, settings component.TelemetrySettings, _ ...ToClientOption) (*http.Client, error) { tlsCfg, err := cc.TLS.LoadTLSConfig(ctx) if err != nil { return nil, err @@ -200,9 +201,12 @@ func (cc *ClientConfig) ToClient(ctx context.Context, host component.Host, setti // Apply middlewares in reverse order so they execute in // forward order. The first middleware runs after authentication. + if len(cc.Middlewares) > 0 && extensions == nil { + return nil, errors.New("middlewares were configured but this component or its host does not support extensions") + } for i := len(cc.Middlewares) - 1; i >= 0; i-- { var wrapper func(http.RoundTripper) (http.RoundTripper, error) - wrapper, err = cc.Middlewares[i].GetHTTPClientRoundTripper(ctx, host.GetExtensions()) + wrapper, err = cc.Middlewares[i].GetHTTPClientRoundTripper(ctx, extensions) // If we failed to get the middleware if err != nil { return nil, err @@ -218,13 +222,12 @@ func (cc *ClientConfig) ToClient(ctx context.Context, host component.Host, setti // request signing-based auth mechanisms operate after compression // and header middleware modifies the request if cc.Auth.HasValue() { - ext := host.GetExtensions() - if ext == nil { - return nil, errors.New("extensions configuration not found") + if extensions == nil { + return nil, errors.New("authentication was configured but this component or its host does not support extensions") } auth := cc.Auth.Get() - httpCustomAuthRoundTripper, aerr := auth.GetHTTPClientAuthenticator(ctx, ext) + httpCustomAuthRoundTripper, aerr := auth.GetHTTPClientAuthenticator(ctx, extensions) if aerr != nil { return nil, aerr } @@ -266,7 +269,7 @@ func (cc *ClientConfig) ToClient(ctx context.Context, host component.Host, setti } var jar http.CookieJar - if cc.Cookies.Enabled { + if cc.Cookies.HasValue() { jar, err = cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) if err != nil { return nil, err @@ -283,18 +286,18 @@ func (cc *ClientConfig) ToClient(ctx context.Context, host component.Host, setti // Custom RoundTripper that adds headers. type headerRoundTripper struct { transport http.RoundTripper - headers map[string]configopaque.String + headers configopaque.MapList } // RoundTrip is a custom RoundTripper that adds headers to the request. func (interceptor *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { // Set Host header if provided - hostHeader, found := interceptor.headers["Host"] + hostHeader, found := interceptor.headers.Get("Host") if found && hostHeader != "" { // `Host` field should be set to override default `Host` header value which is Endpoint req.Host = string(hostHeader) } - for k, v := range interceptor.headers { + for k, v := range interceptor.headers.Iter { req.Header.Set(k, string(v)) } diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go b/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go index 02e6fd73721..62f1ff6d7d2 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go @@ -278,7 +278,11 @@ func (d *decompressor) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (d *decompressor) newBodyReader(r *http.Request) (io.ReadCloser, error) { + if len(d.decoders) == 0 { + return nil, nil // Signal: don't replace r.Body + } encoding := r.Header.Get(headerContentEncoding) + decoder, ok := d.decoders[encoding] if !ok { return nil, fmt.Errorf("unsupported %s: %s", headerContentEncoding, encoding) diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/config.schema.yaml b/vendor/go.opentelemetry.io/collector/config/confighttp/config.schema.yaml new file mode 100644 index 00000000000..185db12e530 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/config.schema.yaml @@ -0,0 +1,166 @@ +$defs: + auth_config: + type: object + properties: + request_params: + description: RequestParameters is a list of parameters that should be extracted from the request and added to the context. When a parameter is found in both the query string and the header, the value from the query string will be used. + type: array + items: + type: string + allOf: + - $ref: go.opentelemetry.io/collector/config/configauth.config + client_config: + description: ClientConfig defines settings for creating an HTTP client. + type: object + properties: + auth: + description: Auth configuration for outgoing HTTP calls. + x-optional: true + $ref: go.opentelemetry.io/collector/config/configauth.config + compression: + description: The compression key for supported compression types within collector. + $ref: go.opentelemetry.io/collector/config/configcompression.type + compression_params: + description: Advanced configuration options for the Compression + $ref: go.opentelemetry.io/collector/config/configcompression.compression_params + cookies: + description: Cookies configures the cookie management of the HTTP client. + x-optional: true + $ref: cookies_config + disable_keep_alives: + description: 'DisableKeepAlives, if true, disables HTTP keep-alives and will only use the connection to the server for a single HTTP request. WARNING: enabling this option can result in significant overhead establishing a new HTTP(S) connection for every request. Before enabling this option please consider whether changes to idle connection settings can achieve your goal.' + type: boolean + endpoint: + description: 'The target URL to send data to (e.g.: http://some.url:9411/v1/traces).' + type: string + force_attempt_http2: + description: 'Enabling ForceAttemptHTTP2 forces the HTTP transport to use the HTTP/2 protocol. By default, this is set to true. NOTE: HTTP/2 does not support settings such as MaxConnsPerHost, MaxIdleConnsPerHost and MaxIdleConns.' + type: boolean + headers: + description: Additional headers attached to each HTTP request sent by the client. Existing header values are overwritten if collision happens. Header values are opaque since they may be sensitive. + $ref: go.opentelemetry.io/collector/config/configopaque.map_list + http2_ping_timeout: + description: HTTP2PingTimeout if there's no response to the ping within the configured value, the connection will be closed. If not set or set to 0, it defaults to 15s. + type: string + x-customType: time.Duration + format: duration + http2_read_idle_timeout: + description: This is needed in case you run into https://github.com/golang/go/issues/59690 https://github.com/golang/go/issues/36026 HTTP2ReadIdleTimeout if the connection has been idle for the configured value send a ping frame for health check 0s means no health check will be performed. + type: string + x-customType: time.Duration + format: duration + idle_conn_timeout: + description: IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself. By default, it is set to 90 seconds. + type: string + x-customType: time.Duration + format: duration + max_conns_per_host: + description: MaxConnsPerHost limits the total number of connections per host, including connections in the dialing, active, and idle states. Default is 0 (unlimited). + type: integer + max_idle_conns: + description: MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open. By default, it is set to 100. Zero means no limit. + type: integer + max_idle_conns_per_host: + description: MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open. If zero, [net/http.DefaultMaxIdleConnsPerHost] is used. + type: integer + middlewares: + description: Middlewares are used to add custom functionality to the HTTP client. Middleware handlers are called in the order they appear in this list, with the first middleware becoming the outermost handler. + type: array + items: + $ref: go.opentelemetry.io/collector/config/configmiddleware.config + proxy_url: + description: ProxyURL setting for the collector + type: string + read_buffer_size: + description: ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize. Default is 0. + type: integer + timeout: + description: Timeout parameter configures `http.Client.Timeout`. Default is 0 (unlimited). + type: string + x-customType: time.Duration + format: duration + tls: + description: TLS struct exposes TLS client configuration. + $ref: go.opentelemetry.io/collector/config/configtls.client_config + write_buffer_size: + description: WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize. Default is 0. + type: integer + cors_config: + description: CORSConfig configures a receiver for HTTP cross-origin resource sharing (CORS). See the underlying https://github.com/rs/cors package for details. + type: object + properties: + allowed_headers: + description: AllowedHeaders sets what headers will be allowed in CORS requests. The Accept, Accept-Language, Content-Type, and Content-Language headers are implicitly allowed. If no headers are listed, X-Requested-With will also be accepted by default. Include "*" to allow any request header. + type: array + items: + type: string + allowed_origins: + description: AllowedOrigins sets the allowed values of the Origin header for HTTP/JSON requests to an OTLP receiver. An origin may contain a wildcard (*) to replace 0 or more characters (e.g., "http://*.domain.com", or "*" to allow any origin). + type: array + items: + type: string + max_age: + description: MaxAge sets the value of the Access-Control-Max-Age response header. Set it to the number of seconds that browsers should cache a CORS preflight response for. + type: integer + server_config: + description: ServerConfig defines settings for creating an HTTP server. + type: object + properties: + auth: + description: Auth for this receiver + x-optional: true + $ref: auth_config + compression_algorithms: + description: 'CompressionAlgorithms configures the list of compression algorithms the server can accept. Default: ["", "gzip", "zstd", "zlib", "snappy", "deflate"]' + type: array + items: + type: string + cors: + description: CORS configures the server for HTTP cross-origin resource sharing (CORS). + x-optional: true + $ref: cors_config + endpoint: + description: Endpoint configures the listening address for the server. + type: string + idle_timeout: + description: IdleTimeout is the maximum amount of time to wait for the next request when keep-alives are enabled. If IdleTimeout is zero, the value of ReadTimeout is used. If both are zero, there is no timeout. + type: string + x-customType: time.Duration + format: duration + include_metadata: + description: IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers + type: boolean + keep_alives_enabled: + description: KeepAlivesEnabled controls whether HTTP keep-alives are enabled. By default, keep-alives are always enabled. Only very resource-constrained environments should disable them. + type: boolean + max_request_body_size: + description: 'MaxRequestBodySize sets the maximum request body size in bytes. Default: 20MiB.' + type: integer + x-customType: int64 + middlewares: + description: Middlewares are used to add custom functionality to the HTTP server. Middleware handlers are called in the order they appear in this list, with the first middleware becoming the outermost handler. + type: array + items: + $ref: go.opentelemetry.io/collector/config/configmiddleware.config + read_header_timeout: + description: ReadHeaderTimeout is the amount of time allowed to read request headers. The connection's read deadline is reset after reading the headers and the Handler can decide what is considered too slow for the body. If ReadHeaderTimeout is zero, the value of ReadTimeout is used. If both are zero, there is no timeout. + type: string + x-customType: time.Duration + format: duration + read_timeout: + description: ReadTimeout is the maximum duration for reading the entire request, including the body. A zero or negative value means there will be no timeout. Because ReadTimeout does not let Handlers make per-request decisions on each request body's acceptable deadline or upload rate, most users will prefer to use ReadHeaderTimeout. It is valid to use them both. + type: string + x-customType: time.Duration + format: duration + response_headers: + description: Additional headers attached to each HTTP response sent to the client. Header values are opaque since they may be sensitive. + $ref: go.opentelemetry.io/collector/config/configopaque.map_list + tls: + description: TLS struct exposes TLS client configuration. + x-optional: true + $ref: go.opentelemetry.io/collector/config/configtls.server_config + write_timeout: + description: WriteTimeout is the maximum duration before timing out writes of the response. It is reset whenever a new request's header is read. Like ReadTimeout, it does not let Handlers make decisions on a per-request basis. A zero or negative value means there will be no timeout. + type: string + x-customType: time.Duration + format: duration diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/server.go b/vendor/go.opentelemetry.io/collector/config/confighttp/server.go index f9da9a53768..e6dec5b3ec0 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/server.go +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/server.go @@ -6,9 +6,11 @@ package confighttp // import "go.opentelemetry.io/collector/config/confighttp" import ( "context" "crypto/tls" + "errors" "io" "net" "net/http" + "strings" "time" "github.com/rs/cors" @@ -22,6 +24,7 @@ import ( "go.opentelemetry.io/collector/config/configauth" "go.opentelemetry.io/collector/config/confighttp/internal" "go.opentelemetry.io/collector/config/configmiddleware" + "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/config/configoptional" "go.opentelemetry.io/collector/config/configtls" @@ -32,10 +35,13 @@ const defaultMaxRequestBodySize = 20 * 1024 * 1024 // 20MiB // ServerConfig defines settings for creating an HTTP server. type ServerConfig struct { - // Endpoint configures the listening address for the server. - Endpoint string `mapstructure:"endpoint,omitempty"` + // NetAddr holds configuration for the network listener. + // + // Transport defaults to "tcp" if unspecified, and only + // "tcp", "tcp4", "tcp6", and "unix" are valid options. + NetAddr confignet.AddrConfig `mapstructure:",squash"` - // TLS struct exposes TLS client configuration. + // TLS struct exposes TLS server configuration. TLS configoptional.Optional[configtls.ServerConfig] `mapstructure:"tls"` // CORS configures the server for HTTP cross-origin resource sharing (CORS). @@ -52,7 +58,7 @@ type ServerConfig struct { // Additional headers attached to each HTTP response sent to the client. // Header values are opaque since they may be sensitive. - ResponseHeaders map[string]configopaque.String `mapstructure:"response_headers"` + ResponseHeaders configopaque.MapList `mapstructure:"response_headers,omitempty"` // CompressionAlgorithms configures the list of compression algorithms the server can accept. Default: ["", "gzip", "zstd", "zlib", "snappy", "deflate"] CompressionAlgorithms []string `mapstructure:"compression_algorithms,omitempty"` @@ -101,8 +107,12 @@ type ServerConfig struct { // NewDefaultServerConfig returns ServerConfig type object with default values. // We encourage to use this function to create an object of ServerConfig. func NewDefaultServerConfig() ServerConfig { + netAddr := confignet.NewDefaultAddrConfig() + // We typically want to create a TCP server and listen over a network. + netAddr.Transport = confignet.TransportTypeTCP + return ServerConfig{ - ResponseHeaders: map[string]configopaque.String{}, + NetAddr: netAddr, WriteTimeout: 30 * time.Second, ReadHeaderTimeout: 1 * time.Minute, IdleTimeout: 1 * time.Minute, @@ -123,7 +133,7 @@ type AuthConfig struct { // ToListener creates a net.Listener. func (sc *ServerConfig) ToListener(ctx context.Context) (net.Listener, error) { - listener, err := net.Listen("tcp", sc.Endpoint) + listener, err := sc.NetAddr.Listen(ctx) if err != nil { return nil, err } @@ -169,7 +179,11 @@ func WithDecoder(key string, dec func(body io.ReadCloser) (io.ReadCloser, error) } // ToServer creates an http.Server from settings object. -func (sc *ServerConfig) ToServer(ctx context.Context, host component.Host, settings component.TelemetrySettings, handler http.Handler, opts ...ToServerOption) (*http.Server, error) { +// +// To allow the configuration to reference middleware or authentication extensions, +// the `extensions` argument should be the output of `host.GetExtensions()`. +// It may also be `nil` in tests where no such extension is expected to be used. +func (sc *ServerConfig) ToServer(ctx context.Context, extensions map[component.ID]component.Component, settings component.TelemetrySettings, handler http.Handler, opts ...ToServerOption) (*http.Server, error) { serverOpts := &toServerOptions{} serverOpts.Apply(opts...) @@ -184,8 +198,11 @@ func (sc *ServerConfig) ToServer(ctx context.Context, host component.Host, setti // Apply middlewares in reverse order so they execute in // forward order. The first middleware runs after // decompression, below, preceded by Auth, CORS, etc. + if len(sc.Middlewares) > 0 && extensions == nil { + return nil, errors.New("middlewares were configured but this component or its host does not support extensions") + } for i := len(sc.Middlewares) - 1; i >= 0; i-- { - wrapper, err := sc.Middlewares[i].GetHTTPServerHandler(ctx, host.GetExtensions()) + wrapper, err := sc.Middlewares[i].GetHTTPServerHandler(ctx, extensions) // If we failed to get the middleware if err != nil { return nil, err @@ -210,8 +227,12 @@ func (sc *ServerConfig) ToServer(ctx context.Context, host component.Host, setti } if sc.Auth.HasValue() { + if extensions == nil { + return nil, errors.New("authentication was configured but this component or its host does not support extensions") + } + auth := sc.Auth.Get() - server, err := auth.GetServerAuthenticator(ctx, host.GetExtensions()) + server, err := auth.GetServerAuthenticator(ctx, extensions) if err != nil { return nil, err } @@ -246,13 +267,18 @@ func (sc *ServerConfig) ToServer(ctx context.Context, host component.Host, setti // // "HTTP span names SHOULD be {method} {target} if there is a (low-cardinality) target available. // If there is no (low-cardinality) {target} available, HTTP span names SHOULD be {method}. - // ... + // + // The {method} MUST be {http.request.method} if the method represents the original method known + // to the instrumentation. In other cases (when {http.request.method} is set to _OTHER), + // {method} MUST be HTTP. + // // Instrumentation MUST NOT default to using URI path as a {target}." // + method := standardizeHTTPMethod(r.Method, "HTTP") if r.Pattern != "" { - return r.Method + " " + r.Pattern + return method + " " + r.Pattern } - return r.Method + return method }), otelhttp.WithMeterProvider(settings.MeterProvider), }, @@ -287,11 +313,11 @@ func (sc *ServerConfig) ToServer(ctx context.Context, host component.Host, setti return server, err } -func responseHeadersHandler(handler http.Handler, headers map[string]configopaque.String) http.Handler { +func responseHeadersHandler(handler http.Handler, headers configopaque.MapList) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { h := w.Header() - for k, v := range headers { + for k, v := range headers.Iter { h.Set(k, string(v)) } @@ -358,3 +384,14 @@ func maxRequestBodySizeInterceptor(next http.Handler, maxRecvSize int64) http.Ha next.ServeHTTP(w, r) }) } + +// standardizeHTTPMethod returns an upper case HTTP method if well-known, otherwise unknown. +// Based on https://github.com/open-telemetry/opentelemetry-go-contrib/blob/1530d71edc6d40d0659187d069081b639ef1b394/instrumentation/github.com/emicklei/go-restful/otelrestful/internal/semconv/util.go#L119 +func standardizeHTTPMethod(method, unknown string) string { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + return method + } + return unknown +} diff --git a/vendor/go.opentelemetry.io/collector/config/configmiddleware/config.yaml b/vendor/go.opentelemetry.io/collector/config/configmiddleware/config.yaml new file mode 100644 index 00000000000..13dba6c7ec9 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configmiddleware/config.yaml @@ -0,0 +1,9 @@ +$defs: + config: + description: Middleware defines the extension ID for a middleware component. + type: object + properties: + id: + description: ID specifies the name of the extension to use. + type: string + x-customType: go.opentelemetry.io/collector/component.ID diff --git a/vendor/go.opentelemetry.io/collector/config/confignet/config.yaml b/vendor/go.opentelemetry.io/collector/config/confignet/config.yaml new file mode 100644 index 00000000000..369d409d9b1 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/confignet/config.yaml @@ -0,0 +1,36 @@ +$defs: + addr_config: + description: AddrConfig represents a network endpoint address. + type: object + properties: + dialer: + description: DialerConfig contains options for connecting to an address. + $ref: dialer_config + endpoint: + description: Endpoint configures the address for this network connection. For TCP and UDP networks, the address has the form "host:port". The host must be a literal IP address, or a host name that can be resolved to IP addresses. The port must be a literal port number or a service name. If the host is a literal IPv6 address it must be enclosed in square brackets, as in "[2001:db8::1]:80" or "[fe80::1%zone]:80". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007. + type: string + transport: + description: Transport to use. Allowed protocols are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only), "udp", "udp4" (IPv4-only), "udp6" (IPv6-only), "ip", "ip4" (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and "unixpacket". + $ref: transport_type + dialer_config: + description: DialerConfig contains options for connecting to an address. + type: object + properties: + timeout: + description: Timeout is the maximum amount of time a dial will wait for a connect to complete. The default is no timeout. + type: string + x-customType: time.Duration + format: duration + tcp_addr_config: + description: TCPAddrConfig represents a TCP endpoint address. + type: object + properties: + dialer: + description: DialerConfig contains options for connecting to an address. + $ref: dialer_config + endpoint: + description: Endpoint configures the address for this network connection. The address has the form "host:port". The host must be a literal IP address, or a host name that can be resolved to IP addresses. The port must be a literal port number or a service name. If the host is a literal IPv6 address it must be enclosed in square brackets, as in "[2001:db8::1]:80" or "[fe80::1%zone]:80". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007. + type: string + transport_type: + description: TransportType represents a type of network transport protocol + type: string diff --git a/vendor/go.opentelemetry.io/collector/config/configopaque/config.yaml b/vendor/go.opentelemetry.io/collector/config/configopaque/config.yaml new file mode 100644 index 00000000000..6c226d023cd --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configopaque/config.yaml @@ -0,0 +1,17 @@ +$defs: + map_list: + description: MapList is a replacement for map[string]configopaque.String with a similar API, which can also be unmarshalled from (and is stored as) a list of name/value pairs. Pairs are assumed to have distinct names. This is checked during config validation. + type: array + items: + $ref: pair + pair: + description: Pair is an element of a MapList, and consists of a name and an opaque value. + type: object + properties: + name: + type: string + value: + $ref: string + string: + description: String alias that is marshaled and printed in an opaque way. To recover the original value, cast it to a string. + type: string diff --git a/vendor/go.opentelemetry.io/collector/config/configopaque/maplist.go b/vendor/go.opentelemetry.io/collector/config/configopaque/maplist.go new file mode 100644 index 00000000000..71a206b633f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configopaque/maplist.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package configopaque // import "go.opentelemetry.io/collector/config/configopaque" + +import ( + "cmp" + "fmt" + "iter" + "slices" + + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/confmap/xconfmap" +) + +// Pair is an element of a MapList, and consists of a name and an opaque value. +type Pair struct { + Name string `mapstructure:"name"` + Value String `mapstructure:"value"` + + // prevent unkeyed literal initialization + _ struct{} +} + +// MapList is a replacement for map[string]configopaque.String with a similar API, +// which can also be unmarshalled from (and is stored as) a list of name/value pairs. +// +// Pairs are assumed to have distinct names. This is checked during config validation. +type MapList []Pair + +var _ confmap.Unmarshaler = (*MapList)(nil) + +// Unmarshal is called by the Collector when unmarshalling from a map. +// When the input config is a slice, this will be skipped, +// and mapstructure's default unmarshalling logic will be used. +func (ml *MapList) Unmarshal(conf *confmap.Conf) error { + var m2 map[string]String + if err := conf.Unmarshal(&m2); err != nil { + return err + } + *ml = make(MapList, 0, len(m2)) + for name, value := range m2 { + *ml = append(*ml, Pair{ + Name: name, + Value: value, + }) + } + slices.SortFunc(*ml, func(p1, p2 Pair) int { + return cmp.Compare(p1.Name, p2.Name) + }) + return nil +} + +var _ xconfmap.Validator = MapList(nil) + +func (ml MapList) Validate() error { + // Check for duplicate keys + counts := make(map[string]int, len(ml)) + for _, OpaquePair := range ml { + counts[OpaquePair.Name]++ + } + if len(counts) == len(ml) { + return nil + } + var duplicates []string + for name, cnt := range counts { + if cnt > 1 { + duplicates = append(duplicates, name) + } + } + slices.Sort(duplicates) + return fmt.Errorf("duplicate keys in map-style list: %v", duplicates) +} + +var _ iter.Seq2[string, String] = MapList(nil).Iter + +// Iter is an iterator over key/value pairs for use in for-range loops. +// It is the MapList equivalent of directly ranging over a map. +func (ml MapList) Iter(yield func(name string, value String) bool) { + for _, OpaquePair := range ml { + if !yield(OpaquePair.Name, OpaquePair.Value) { + break + } + } +} + +// Get looks up a pair's value based on its name. +// It is the MapList equivalent of `val, ok := m[key]`. +// However, it has linear time complexity. +func (ml MapList) Get(name string) (val String, ok bool) { + for _, OpaquePair := range ml { + if OpaquePair.Name == name { + return OpaquePair.Value, true + } + } + return val, false +} + +// Set sets the value corresponding to a given name. +// It is the MapList equivalent of `m[key] = val`. +// However, it has linear time complexity, +// and does not affect shallow copies. +func (ml *MapList) Set(name string, val String) { + if ml == nil { + panic("assignment to entry in nil *MapList") + } + for i, OpaquePair := range *ml { + if OpaquePair.Name == name { + *ml = slices.Clone(*ml) + (*ml)[i].Value = val + return + } + } + *ml = append(make(MapList, 0, len(*ml)+1), *ml...) + *ml = append(*ml, Pair{Name: name, Value: val}) +} diff --git a/vendor/go.opentelemetry.io/collector/config/configoptional/optional.go b/vendor/go.opentelemetry.io/collector/config/configoptional/optional.go index c950487953f..914827c5588 100644 --- a/vendor/go.opentelemetry.io/collector/config/configoptional/optional.go +++ b/vendor/go.opentelemetry.io/collector/config/configoptional/optional.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/confmap/xconfmap" + "go.opentelemetry.io/collector/featuregate" ) type flavor int @@ -26,6 +27,7 @@ const ( // It supports a third flavor for struct types: Default(defaultVal). // // For struct types, it supports unmarshaling from a configuration source. +// For struct types, it supports an 'enabled' field to explicitly disable a section. // The zero value of Optional is None. type Optional[T any] struct { // value is the value of the Optional. @@ -165,6 +167,17 @@ func (o *Optional[T]) GetOrInsertDefault() *T { var _ confmap.Unmarshaler = (*Optional[any])(nil) +var ( + addEnabledFieldFeatureGateID = "configoptional.AddEnabledField" + addEnabledFieldFeatureGate = featuregate.GlobalRegistry().MustRegister( + addEnabledFieldFeatureGateID, + featuregate.StageBeta, + featuregate.WithRegisterFromVersion("v0.138.0"), + featuregate.WithRegisterDescription("Allows optional fields to be toggled via an 'enabled' field."), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/14021"), + ) +) + // Unmarshal the configuration into the Optional value. // // The behavior of this method depends on the state of the Optional: @@ -173,6 +186,11 @@ var _ confmap.Unmarshaler = (*Optional[any])(nil) // - Default[T](val), equivalent to unmarshaling into a field of type T with base value val, // using val without overrides from the configuration if the configuration is nil. // +// (Under the `configoptional.AddEnabledField` feature gate) +// If the configuration contains an 'enabled' field: +// - if enabled is true: the Optional becomes Some after unmarshaling. +// - if enabled is false: the Optional becomes None regardless of other configuration values. +// // T must be derefenceable to a type with struct kind and not have an 'enabled' field. // Scalar values are not supported. func (o *Optional[T]) Unmarshal(conf *confmap.Conf) error { @@ -186,11 +204,29 @@ func (o *Optional[T]) Unmarshal(conf *confmap.Conf) error { return nil } - if err := conf.Unmarshal(&o.value); err != nil { + isEnabled := true + if addEnabledFieldFeatureGate.IsEnabled() && conf.IsSet("enabled") { + enabled := conf.Get("enabled") + conf.Delete("enabled") + var ok bool + if isEnabled, ok = enabled.(bool); !ok { + return fmt.Errorf("unexpected type %T for 'enabled': got '%v' value expected 'true' or 'false'", enabled, enabled) + } + } + + if err := conf.Unmarshal(&o.value, xconfmap.WithForceUnmarshaler()); err != nil { return err } - o.flavor = someFlavor + if isEnabled { + o.flavor = someFlavor + } else { + o.flavor = noneFlavor + // override o.value with zero value. + var zero T + o.value = zero + } + return nil } diff --git a/vendor/go.opentelemetry.io/collector/config/configretry/config.yaml b/vendor/go.opentelemetry.io/collector/config/configretry/config.yaml new file mode 100644 index 00000000000..f8a4bab979a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configretry/config.yaml @@ -0,0 +1,31 @@ +$defs: + back_off_config: + description: BackOffConfig defines configuration for retrying batches in case of export failure. The current supported strategy is exponential backoff. + type: object + properties: + enabled: + description: Enabled indicates whether to not retry sending batches in case of export failure. + type: boolean + initial_interval: + description: InitialInterval the time to wait after the first failure before retrying. + type: string + x-customType: time.Duration + format: duration + max_elapsed_time: + description: MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch. Once this value is reached, the data is discarded. If set to 0, the retries are never stopped. + type: string + x-customType: time.Duration + format: duration + max_interval: + description: MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between consecutive retries will always be `MaxInterval`. + type: string + x-customType: time.Duration + format: duration + multiplier: + description: Multiplier is the value multiplied by the backoff interval bounds + type: number + x-customType: float64 + randomization_factor: + description: RandomizationFactor is a random factor used to calculate next backoffs Randomized interval = RetryInterval * (1 ± RandomizationFactor) + type: number + x-customType: float64 diff --git a/vendor/go.opentelemetry.io/collector/config/configtelemetry/config.schema.yaml b/vendor/go.opentelemetry.io/collector/config/configtelemetry/config.schema.yaml new file mode 100644 index 00000000000..674f389267e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configtelemetry/config.schema.yaml @@ -0,0 +1,5 @@ +$defs: + level: + description: Level is the level of internal telemetry (metrics, logs, traces about the component itself) that every component should generate. + type: integer + x-customType: int32 diff --git a/vendor/go.opentelemetry.io/collector/config/configtls/README.md b/vendor/go.opentelemetry.io/collector/config/configtls/README.md index 79231223356..2d66fae3cdb 100644 --- a/vendor/go.opentelemetry.io/collector/config/configtls/README.md +++ b/vendor/go.opentelemetry.io/collector/config/configtls/README.md @@ -99,7 +99,7 @@ Example: ```yaml exporters: - otlp: + otlp_grpc: endpoint: myserver.local:55690 tls: insecure: false @@ -174,7 +174,7 @@ Example: ```yaml exporters: - otlp: + otlp_grpc: endpoint: myserver.local:55690 tls: ca_file: ca.crt diff --git a/vendor/go.opentelemetry.io/collector/config/configtls/config.yaml b/vendor/go.opentelemetry.io/collector/config/configtls/config.yaml new file mode 100644 index 00000000000..40d997f76f2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configtls/config.yaml @@ -0,0 +1,90 @@ +$defs: + client_config: + description: ClientConfig contains TLS configurations that are specific to client connections in addition to the common configurations. This should be used by components configuring TLS client connections. + type: object + properties: + insecure: + description: In gRPC and HTTP when set to true, this is used to disable the client transport security. See https://godoc.org/google.golang.org/grpc#WithInsecure for gRPC. Please refer to https://godoc.org/crypto/tls#Config for more information. (optional, default false) + type: boolean + insecure_skip_verify: + description: InsecureSkipVerify will enable TLS but not verify the certificate. + type: boolean + server_name_override: + description: ServerName requested by client for virtual hosting. This sets the ServerName in the TLSConfig. Please refer to https://godoc.org/crypto/tls#Config for more information. (optional) + type: string + allOf: + - $ref: config + config: + description: 'Config exposes the common client and server TLS configurations. Note: Since there isn''t anything specific to a server connection. Components with server connections should use Config.' + type: object + properties: + ca_file: + description: Path to the CA cert. For a client this verifies the server certificate. For a server this verifies client certificates. If empty uses system root CA. (optional) + type: string + ca_pem: + description: In memory PEM encoded cert. (optional) + $ref: go.opentelemetry.io/collector/config/configopaque.string + cert_file: + description: Path to the TLS cert to use for TLS required connections. (optional) + type: string + cert_pem: + description: In memory PEM encoded TLS cert to use for TLS required connections. (optional) + $ref: go.opentelemetry.io/collector/config/configopaque.string + cipher_suites: + description: CipherSuites is a list of TLS cipher suites that the TLS transport can use. If left blank, a safe default list is used. See https://go.dev/src/crypto/tls/cipher_suites.go for a list of supported cipher suites. + type: array + items: + type: string + curve_preferences: + description: contains the elliptic curves that will be used in an ECDHE handshake, in preference order Defaults to empty list and "crypto/tls" defaults are used, internally. + type: array + items: + type: string + include_system_ca_certs_pool: + description: If true, load system CA certificates pool in addition to the certificates configured in this struct. + type: boolean + key_file: + description: Path to the TLS key to use for TLS required connections. (optional) + type: string + key_pem: + description: In memory PEM encoded TLS key to use for TLS required connections. (optional) + $ref: go.opentelemetry.io/collector/config/configopaque.string + max_version: + description: MaxVersion sets the maximum TLS version that is acceptable. If not set, refer to crypto/tls for defaults. (optional) + type: string + min_version: + description: MinVersion sets the minimum TLS version that is acceptable. If not set, TLS 1.2 will be used. (optional) + type: string + reload_interval: + description: ReloadInterval specifies the duration after which the certificate will be reloaded If not set, it will never be reloaded (optional) + type: string + x-customType: time.Duration + format: duration + tpm: + description: Trusted platform module configuration + $ref: tpm_config + server_config: + description: ServerConfig contains TLS configurations that are specific to server connections in addition to the common configurations. This should be used by components configuring TLS server connections. + type: object + properties: + client_ca_file: + description: Path to the TLS cert to use by the server to verify a client certificate. (optional) This sets the ClientCAs and ClientAuth to RequireAndVerifyClientCert in the TLSConfig. Please refer to https://godoc.org/crypto/tls#Config for more information. (optional) + type: string + client_ca_file_reload: + description: Reload the ClientCAs file when it is modified (optional, default false) + type: boolean + allOf: + - $ref: config + tpm_config: + description: TPMConfig defines trusted platform module configuration for storing TLS keys. + type: object + properties: + auth: + type: string + enabled: + type: boolean + owner_auth: + type: string + path: + description: The path to the TPM device or Unix domain socket. For instance /dev/tpm0 or /dev/tpmrm0. + type: string diff --git a/vendor/go.opentelemetry.io/collector/config/configtls/configtls.go b/vendor/go.opentelemetry.io/collector/config/configtls/configtls.go index 9bed894e292..24834915eed 100644 --- a/vendor/go.opentelemetry.io/collector/config/configtls/configtls.go +++ b/vendor/go.opentelemetry.io/collector/config/configtls/configtls.go @@ -9,8 +9,10 @@ import ( "crypto/x509" "errors" "fmt" + "maps" "os" "path/filepath" + "slices" "sync" "time" @@ -268,15 +270,24 @@ func (c Config) loadTLSConfig() (*tls.Config, error) { if err != nil { return nil, err } + + allowedCurves := slices.Collect(maps.Values(tlsCurveTypes)) curvePreferences := make([]tls.CurveID, 0, len(c.CurvePreferences)) for _, curve := range c.CurvePreferences { curveID, ok := tlsCurveTypes[curve] if !ok { - return nil, fmt.Errorf("invalid curve type: %s. Expected values are [P-256, P-384, P-521, X25519, X25519MLKEM768]", curve) + return nil, fmt.Errorf("invalid curve type: %s. Expected values are %s", curveID, allowedCurves) } curvePreferences = append(curvePreferences, curveID) } + // If no curve preferences were explicitly specified in the configuration, use + // the ones we allow. This helps in particular with FIPS builds where not all curves + // are allowed. + if len(curvePreferences) == 0 { + curvePreferences = allowedCurves + } + return &tls.Config{ RootCAs: certPool, GetCertificate: getCertificate, @@ -501,11 +512,3 @@ var tlsVersions = map[string]uint16{ "1.2": tls.VersionTLS12, "1.3": tls.VersionTLS13, } - -var tlsCurveTypes = map[string]tls.CurveID{ - "P256": tls.CurveP256, - "P384": tls.CurveP384, - "P521": tls.CurveP521, - "X25519": tls.X25519, - "X25519MLKEM768": tls.X25519MLKEM768, -} diff --git a/vendor/go.opentelemetry.io/collector/config/configtls/curves_fips.go b/vendor/go.opentelemetry.io/collector/config/configtls/curves_fips.go new file mode 100644 index 00000000000..877c32ce627 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configtls/curves_fips.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build requirefips + +package configtls // import "go.opentelemetry.io/collector/config/configtls" + +import "crypto/tls" + +var tlsCurveTypes = map[string]tls.CurveID{ + "P256": tls.CurveP256, + "P384": tls.CurveP384, + "P521": tls.CurveP521, + + // The following X25519 curves are not available in FIPS mode, so we remove them from the map. + // See also https://cs.opensource.google/go/go/+/refs/tags/go1.24.6:src/crypto/ecdh/x25519.go + //"X25519": tls.X25519, + //"X25519MLKEM768": tls.X25519MLKEM768, +} diff --git a/vendor/go.opentelemetry.io/collector/config/configtls/curves_nofips.go b/vendor/go.opentelemetry.io/collector/config/configtls/curves_nofips.go new file mode 100644 index 00000000000..f412881b644 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configtls/curves_nofips.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !requirefips + +package configtls // import "go.opentelemetry.io/collector/config/configtls" + +import "crypto/tls" + +var tlsCurveTypes = map[string]tls.CurveID{ + "P256": tls.CurveP256, + "P384": tls.CurveP384, + "P521": tls.CurveP521, + "X25519": tls.X25519, + "X25519MLKEM768": tls.X25519MLKEM768, +} diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/decoder.go b/vendor/go.opentelemetry.io/collector/confmap/internal/decoder.go index 17f172a2ec3..e7a486c47f1 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/internal/decoder.go +++ b/vendor/go.opentelemetry.io/collector/confmap/internal/decoder.go @@ -7,7 +7,6 @@ import ( "encoding" "errors" "fmt" - "maps" "reflect" "slices" "strings" @@ -32,6 +31,18 @@ func WithIgnoreUnused() UnmarshalOption { }) } +// WithForceUnmarshaler sets an option to run a top-level Unmarshal method, +// even if the Conf being unmarshaled is already a parameter from an Unmarshal method. +// To avoid infinite recursion, this should only be used when unmarshaling into +// a different type from the current Unmarshaler. +// For instance, this should be used in wrapper types such as configoptional.Optional +// to ensure the inner type's Unmarshal method is called. +func WithForceUnmarshaler() UnmarshalOption { + return UnmarshalOptionFunc(func(uo *UnmarshalOptions) { + uo.ForceUnmarshaler = true + }) +} + // Decode decodes the contents of the Conf into the result argument, using a // mapstructure decoder with the following notable behaviors. Ensures that maps whose // values are nil pointer structs resolved to the zero value of the target struct (see @@ -54,10 +65,10 @@ func Decode(input, result any, settings UnmarshalOptions, skipTopLevelUnmarshale mapKeyStringToMapKeyTextUnmarshalerHookFunc(), mapstructure.StringToTimeDurationHookFunc(), mapstructure.TextUnmarshallerHookFunc(), - unmarshalerHookFunc(result, skipTopLevelUnmarshaler), + unmarshalerHookFunc(result, skipTopLevelUnmarshaler && !settings.ForceUnmarshaler), // after the main unmarshaler hook is called, // we unmarshal the embedded structs if present to merge with the result: - unmarshalerEmbeddedStructsHookFunc(), + unmarshalerEmbeddedStructsHookFunc(settings), zeroSliceAndMapHookFunc(), ), } @@ -94,14 +105,17 @@ func useExpandValue() mapstructure.DecodeHookFuncType { return v, nil } - switch to.Kind() { - case reflect.Array, reflect.Slice, reflect.Map: - if isStringyStructure(to) { - // If the target field is a stringy structure, sanitize to use the original string value everywhere. - return sanitizeToStr(data), nil + if !NewExpandedValueSanitizer.IsEnabled() { + switch to.Kind() { + case reflect.Array, reflect.Slice, reflect.Map: + if isStringyStructure(to) { + // If the target field is a stringy structure, sanitize to use the original string value everywhere. + return sanitizeToStr(data), nil + } + + // Otherwise, sanitize to use the parsed value everywhere. + return sanitize(data), nil } - // Otherwise, sanitize to use the parsed value everywhere. - return sanitize(data), nil } return data, nil } @@ -168,7 +182,7 @@ func mapKeyStringToMapKeyTextUnmarshalerHookFunc() mapstructure.DecodeHookFuncTy } // Create a map with key value of to's key to bool. - fieldNameSet := reflect.MakeMap(reflect.MapOf(to.Key(), reflect.TypeOf(true))) + fieldNameSet := reflect.MakeMap(reflect.MapOf(to.Key(), reflect.TypeFor[bool]())) for k := range data.(map[string]any) { // Create a new value of the to's key type. tKey := reflect.New(to.Key()) @@ -189,7 +203,7 @@ func mapKeyStringToMapKeyTextUnmarshalerHookFunc() mapstructure.DecodeHookFuncTy // unmarshalerEmbeddedStructsHookFunc provides a mechanism for embedded structs to define their own unmarshal logic, // by implementing the Unmarshaler interface. -func unmarshalerEmbeddedStructsHookFunc() mapstructure.DecodeHookFuncValue { +func unmarshalerEmbeddedStructsHookFunc(settings UnmarshalOptions) mapstructure.DecodeHookFuncValue { return safeWrapDecodeHookFunc(func(from, to reflect.Value) (any, error) { if to.Type().Kind() != reflect.Struct { return from.Interface(), nil @@ -198,32 +212,72 @@ func unmarshalerEmbeddedStructsHookFunc() mapstructure.DecodeHookFuncValue { if !ok { return from.Interface(), nil } + + // First call Unmarshaler on squashed embedded fields, if necessary. + var squashedUnmarshalers []int for i := 0; i < to.Type().NumField(); i++ { - // embedded structs passed in via `squash` cannot be pointers. We just check if they are structs: f := to.Type().Field(i) - if f.IsExported() && slices.Contains(strings.Split(f.Tag.Get(MapstructureTag), ","), "squash") { - if unmarshaler, ok := to.Field(i).Addr().Interface().(Unmarshaler); ok { - c := NewFromStringMap(fromAsMap) - c.skipTopLevelUnmarshaler = true - if err := unmarshaler.Unmarshal(c); err != nil { - return nil, err - } - // the struct we receive from this unmarshaling only contains fields related to the embedded struct. - // we merge this partially unmarshaled struct with the rest of the result. - // note we already unmarshaled the main struct earlier, and therefore merge with it. - conf := New() - if err := conf.Marshal(unmarshaler); err != nil { - return nil, err - } - resultMap := conf.ToStringMap() - if fromAsMap == nil && len(resultMap) > 0 { - fromAsMap = make(map[string]any, len(resultMap)) - } - maps.Copy(fromAsMap, resultMap) - } + if !f.IsExported() { + continue + } + tagParts := strings.Split(f.Tag.Get(MapstructureTag), ",") + if !slices.Contains(tagParts[1:], "squash") { + continue } + unmarshaler, ok := to.Field(i).Addr().Interface().(Unmarshaler) + if !ok { + continue + } + c := NewFromStringMap(fromAsMap) + c.skipTopLevelUnmarshaler = true + if err := unmarshaler.Unmarshal(c); err != nil { + return nil, err + } + squashedUnmarshalers = append(squashedUnmarshalers, i) } - return fromAsMap, nil + + // No squashed unmarshalers, we can let mapstructure do its job. + if len(squashedUnmarshalers) == 0 { + return fromAsMap, nil + } + + // We need to unmarshal into all other fields without overwriting the output of the Unmarshal calls. + // To do that, create a custom "partial" struct containing only the non-squashed fields. + var fields []reflect.StructField + var fieldValues []reflect.Value + for i := 0; i < to.Type().NumField(); i++ { + f := to.Type().Field(i) + if !f.IsExported() { + continue + } + if slices.Contains(squashedUnmarshalers, i) { + continue + } + fields = append(fields, f) + fieldValues = append(fieldValues, to.Field(i)) + } + restType := reflect.StructOf(fields) + restValue := reflect.New(restType) + + // Copy initial values into partial struct. + for i, fieldValue := range fieldValues { + restValue.Elem().Field(i).Set(fieldValue) + } + + // Decode into the partial struct. + // This performs a recursive call into this hook, which will be handled by the "no squashed unmarshalers" case above. + // We need to set `IgnoreUnused` to avoid errors from the map containing fields only present in the full struct. + settings.IgnoreUnused = true + if err := Decode(fromAsMap, restValue.Interface(), settings, true); err != nil { + return nil, err + } + + // Copy decoding results back to the original struct. + for i, fieldValue := range fieldValues { + fieldValue.Set(restValue.Elem().Field(i)) + } + + return to, nil }) } diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/featuregates.go b/vendor/go.opentelemetry.io/collector/confmap/internal/featuregates.go index 47f9348dc42..2a186f55d12 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/internal/featuregates.go +++ b/vendor/go.opentelemetry.io/collector/confmap/internal/featuregates.go @@ -12,3 +12,11 @@ var EnableMergeAppendOption = featuregate.GlobalRegistry().MustRegister( featuregate.WithRegisterDescription("Combines lists when resolving configs from different sources. This feature gate will not be stabilized 'as is'; the current behavior will remain the default."), featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/8754"), ) + +var NewExpandedValueSanitizer = featuregate.GlobalRegistry().MustRegister( + "confmap.newExpandedValueSanitizer", + featuregate.StageBeta, + featuregate.WithRegisterFromVersion("v0.144.0"), + featuregate.WithRegisterDescription("Fixes some types of decoding errors where environment variables are parsed as non-string types but assigned to string fields."), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/pull/14413"), +) diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go b/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go index d8208747d37..0a10560ffa6 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go +++ b/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go @@ -12,7 +12,7 @@ import ( "strings" "github.com/go-viper/mapstructure/v2" - yaml "go.yaml.in/yaml/v3" + "go.yaml.in/yaml/v3" ) const ( diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/unmarshaloption.go b/vendor/go.opentelemetry.io/collector/confmap/internal/unmarshaloption.go index e82ce6fd0cc..77e38417ea8 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/internal/unmarshaloption.go +++ b/vendor/go.opentelemetry.io/collector/confmap/internal/unmarshaloption.go @@ -10,7 +10,8 @@ type UnmarshalOption interface { // UnmarshalOptions is used by (*Conf).Unmarshal to toggle unmarshaling settings. // It is in the `internal` package so experimental options can be added in xconfmap. type UnmarshalOptions struct { - IgnoreUnused bool + IgnoreUnused bool + ForceUnmarshaler bool } type UnmarshalOptionFunc func(*UnmarshalOptions) diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider.go index ab3e9a18a5b..cf22f53a316 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/provider.go +++ b/vendor/go.opentelemetry.io/collector/confmap/provider.go @@ -9,7 +9,7 @@ import ( "time" "go.uber.org/zap" - yaml "go.yaml.in/yaml/v3" + "go.yaml.in/yaml/v3" ) // ProviderSettings are the settings to initialize a Provider. diff --git a/vendor/go.opentelemetry.io/collector/confmap/xconfmap/config.go b/vendor/go.opentelemetry.io/collector/confmap/xconfmap/config.go index ba90b54bc28..28b40cf8b1f 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/xconfmap/config.go +++ b/vendor/go.opentelemetry.io/collector/confmap/xconfmap/config.go @@ -11,11 +11,12 @@ import ( "strings" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/confmap/internal" ) // As interface types are only used for static typing, a common idiom to find the reflection Type // for an interface type Foo is to use a *Foo value. -var configValidatorType = reflect.TypeOf((*Validator)(nil)).Elem() +var configValidatorType = reflect.TypeFor[Validator]() // Validator defines an optional interface for configurations to implement to do validation. type Validator interface { @@ -194,3 +195,13 @@ func stringifyMapKey(val reflect.Value) string { } } } + +// WithForceUnmarshaler sets an option to run a top-level Unmarshal method, +// even if the Conf being unmarshaled is already a parameter from an Unmarshal method. +// To avoid infinite recursion, this should only be used when unmarshaling into +// a different type from the current Unmarshaler. +// For instance, this should be used in wrapper types such as configoptional.Optional +// to ensure the inner type's Unmarshal method is called. +func WithForceUnmarshaler() confmap.UnmarshalOption { + return internal.WithForceUnmarshaler() +} diff --git a/vendor/go.opentelemetry.io/collector/confmap/xconfmap/confmap.go b/vendor/go.opentelemetry.io/collector/confmap/xconfmap/confmap.go new file mode 100644 index 00000000000..ab061c8d7fd --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/xconfmap/confmap.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xconfmap // import "go.opentelemetry.io/collector/confmap/xconfmap" + +import ( + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/confmap/internal" +) + +// ExpandedValue represents a configuration value that has been expanded from a template +// (e.g., environment variable substitution). It contains both the parsed value and the +// original string representation. +// +// This type is exposed to allow working with configuration values returned by ToStringMapRaw. +type ExpandedValue = internal.ExpandedValue + +// ToStringMapRaw returns the raw configuration map without sanitization. +// This is an experimental API and may change or be removed in future versions. +// The returned map may change at any time without prior notice. +// +// Unlike confmap.Conf.ToStringMap(), this function does not sanitize the map +// by removing expandedValue references. This allows for configmap manipulation +// without destroying internal types. +func ToStringMapRaw(conf *confmap.Conf) map[string]any { + return internal.ToStringMapRaw(conf) +} diff --git a/vendor/go.opentelemetry.io/collector/connector/connector.go b/vendor/go.opentelemetry.io/collector/connector/connector.go index e7c11d95bd5..4689519644f 100644 --- a/vendor/go.opentelemetry.io/collector/connector/connector.go +++ b/vendor/go.opentelemetry.io/collector/connector/connector.go @@ -9,6 +9,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/connector/internal" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/internal/componentalias" "go.opentelemetry.io/collector/pipeline" ) @@ -234,6 +235,7 @@ func WithLogsToLogs(createLogsToLogs CreateLogsToLogsFunc, sl component.Stabilit type factory struct { cfgType component.Type component.CreateDefaultConfigFunc + componentalias.TypeAliasHolder createTracesToTracesFunc CreateTracesToTracesFunc createTracesToMetricsFunc CreateTracesToMetricsFunc @@ -308,8 +310,8 @@ func (f *factory) CreateTracesToTraces(ctx context.Context, set Settings, cfg co return nil, internal.ErrDataTypes(set.ID, pipeline.SignalTraces, pipeline.SignalTraces) } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createTracesToTracesFunc(ctx, set, cfg, next) @@ -320,8 +322,8 @@ func (f *factory) CreateTracesToMetrics(ctx context.Context, set Settings, cfg c return nil, internal.ErrDataTypes(set.ID, pipeline.SignalTraces, pipeline.SignalMetrics) } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createTracesToMetricsFunc(ctx, set, cfg, next) @@ -332,8 +334,8 @@ func (f *factory) CreateTracesToLogs(ctx context.Context, set Settings, cfg comp return nil, internal.ErrDataTypes(set.ID, pipeline.SignalTraces, pipeline.SignalLogs) } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createTracesToLogsFunc(ctx, set, cfg, next) @@ -344,8 +346,8 @@ func (f *factory) CreateMetricsToTraces(ctx context.Context, set Settings, cfg c return nil, internal.ErrDataTypes(set.ID, pipeline.SignalMetrics, pipeline.SignalTraces) } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createMetricsToTracesFunc(ctx, set, cfg, next) @@ -356,8 +358,8 @@ func (f *factory) CreateMetricsToMetrics(ctx context.Context, set Settings, cfg return nil, internal.ErrDataTypes(set.ID, pipeline.SignalMetrics, pipeline.SignalMetrics) } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createMetricsToMetricsFunc(ctx, set, cfg, next) @@ -368,8 +370,8 @@ func (f *factory) CreateMetricsToLogs(ctx context.Context, set Settings, cfg com return nil, internal.ErrDataTypes(set.ID, pipeline.SignalMetrics, pipeline.SignalLogs) } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createMetricsToLogsFunc(ctx, set, cfg, next) @@ -380,8 +382,8 @@ func (f *factory) CreateLogsToTraces(ctx context.Context, set Settings, cfg comp return nil, internal.ErrDataTypes(set.ID, pipeline.SignalLogs, pipeline.SignalTraces) } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createLogsToTracesFunc(ctx, set, cfg, next) @@ -392,8 +394,8 @@ func (f *factory) CreateLogsToMetrics(ctx context.Context, set Settings, cfg com return nil, internal.ErrDataTypes(set.ID, pipeline.SignalLogs, pipeline.SignalMetrics) } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createLogsToMetricsFunc(ctx, set, cfg, next) @@ -404,8 +406,8 @@ func (f *factory) CreateLogsToLogs(ctx context.Context, set Settings, cfg compon return nil, internal.ErrDataTypes(set.ID, pipeline.SignalLogs, pipeline.SignalLogs) } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createLogsToLogsFunc(ctx, set, cfg, next) @@ -416,6 +418,7 @@ func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefa f := &factory{ cfgType: cfgType, CreateDefaultConfigFunc: createDefaultConfig, + TypeAliasHolder: componentalias.NewTypeAliasHolder(), } for _, opt := range options { opt.apply(f) diff --git a/vendor/go.opentelemetry.io/collector/connector/xconnector/connector.go b/vendor/go.opentelemetry.io/collector/connector/xconnector/connector.go index 3a516ad918d..3f874d0838a 100644 --- a/vendor/go.opentelemetry.io/collector/connector/xconnector/connector.go +++ b/vendor/go.opentelemetry.io/collector/connector/xconnector/connector.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/collector/connector/internal" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/internal/componentalias" "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/pipeline/xpipeline" ) @@ -78,88 +79,82 @@ type CreateProfilesToLogsFunc func(context.Context, connector.Settings, componen // FactoryOption apply changes to ReceiverOptions. type FactoryOption interface { // applyOption applies the option. - applyOption(o *factoryOpts) + applyOption(o *factory) } // factoryOptionFunc is an ReceiverFactoryOption created through a function. -type factoryOptionFunc func(*factoryOpts) +type factoryOptionFunc func(*factory) -func (f factoryOptionFunc) applyOption(o *factoryOpts) { +func (f factoryOptionFunc) applyOption(o *factory) { f(o) } -type factoryOpts struct { - opts []connector.FactoryOption - - *factory -} - // WithTracesToTraces overrides the default "error not supported" implementation for WithTracesToTraces and the default "undefined" stability level. func WithTracesToTraces(createTracesToTraces connector.CreateTracesToTracesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, connector.WithTracesToTraces(createTracesToTraces, sl)) }) } // WithTracesToMetrics overrides the default "error not supported" implementation for WithTracesToMetrics and the default "undefined" stability level. func WithTracesToMetrics(createTracesToMetrics connector.CreateTracesToMetricsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, connector.WithTracesToMetrics(createTracesToMetrics, sl)) }) } // WithTracesToLogs overrides the default "error not supported" implementation for WithTracesToLogs and the default "undefined" stability level. func WithTracesToLogs(createTracesToLogs connector.CreateTracesToLogsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, connector.WithTracesToLogs(createTracesToLogs, sl)) }) } // WithMetricsToTraces overrides the default "error not supported" implementation for WithMetricsToTraces and the default "undefined" stability level. func WithMetricsToTraces(createMetricsToTraces connector.CreateMetricsToTracesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, connector.WithMetricsToTraces(createMetricsToTraces, sl)) }) } // WithMetricsToMetrics overrides the default "error not supported" implementation for WithMetricsToMetrics and the default "undefined" stability level. func WithMetricsToMetrics(createMetricsToMetrics connector.CreateMetricsToMetricsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, connector.WithMetricsToMetrics(createMetricsToMetrics, sl)) }) } // WithMetricsToLogs overrides the default "error not supported" implementation for WithMetricsToLogs and the default "undefined" stability level. func WithMetricsToLogs(createMetricsToLogs connector.CreateMetricsToLogsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, connector.WithMetricsToLogs(createMetricsToLogs, sl)) }) } // WithLogsToTraces overrides the default "error not supported" implementation for WithLogsToTraces and the default "undefined" stability level. func WithLogsToTraces(createLogsToTraces connector.CreateLogsToTracesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, connector.WithLogsToTraces(createLogsToTraces, sl)) }) } // WithLogsToMetrics overrides the default "error not supported" implementation for WithLogsToMetrics and the default "undefined" stability level. func WithLogsToMetrics(createLogsToMetrics connector.CreateLogsToMetricsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, connector.WithLogsToMetrics(createLogsToMetrics, sl)) }) } // WithLogsToLogs overrides the default "error not supported" implementation for WithLogsToLogs and the default "undefined" stability level. func WithLogsToLogs(createLogsToLogs connector.CreateLogsToLogsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, connector.WithLogsToLogs(createLogsToLogs, sl)) }) } // WithTracesToProfiles overrides the default "error not supported" implementation for WithTracesToProfiles and the default "undefined" stability level. func WithTracesToProfiles(createTracesToProfiles CreateTracesToProfilesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.tracesToProfilesStabilityLevel = sl o.createTracesToProfilesFunc = createTracesToProfiles }) @@ -167,7 +162,7 @@ func WithTracesToProfiles(createTracesToProfiles CreateTracesToProfilesFunc, sl // WithMetricsToProfiles overrides the default "error not supported" implementation for WithMetricsToProfiles and the default "undefined" stability level. func WithMetricsToProfiles(createMetricsToProfiles CreateMetricsToProfilesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.metricsToProfilesStabilityLevel = sl o.createMetricsToProfilesFunc = createMetricsToProfiles }) @@ -175,7 +170,7 @@ func WithMetricsToProfiles(createMetricsToProfiles CreateMetricsToProfilesFunc, // WithLogsToProfiles overrides the default "error not supported" implementation for WithLogsToProfiles and the default "undefined" stability level. func WithLogsToProfiles(createLogsToProfiles CreateLogsToProfilesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.logsToProfilesStabilityLevel = sl o.createLogsToProfilesFunc = createLogsToProfiles }) @@ -183,7 +178,7 @@ func WithLogsToProfiles(createLogsToProfiles CreateLogsToProfilesFunc, sl compon // WithProfilesToProfiles overrides the default "error not supported" implementation for WithProfilesToProfiles and the default "undefined" stability level. func WithProfilesToProfiles(createProfilesToProfiles CreateProfilesToProfilesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.profilesToProfilesStabilityLevel = sl o.createProfilesToProfilesFunc = createProfilesToProfiles }) @@ -191,7 +186,7 @@ func WithProfilesToProfiles(createProfilesToProfiles CreateProfilesToProfilesFun // WithProfilesToTraces overrides the default "error not supported" implementation for WithProfilesToTraces and the default "undefined" stability level. func WithProfilesToTraces(createProfilesToTraces CreateProfilesToTracesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.profilesToTracesStabilityLevel = sl o.createProfilesToTracesFunc = createProfilesToTraces }) @@ -199,7 +194,7 @@ func WithProfilesToTraces(createProfilesToTraces CreateProfilesToTracesFunc, sl // WithProfilesToMetrics overrides the default "error not supported" implementation for WithProfilesToMetrics and the default "undefined" stability level. func WithProfilesToMetrics(createProfilesToMetrics CreateProfilesToMetricsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.profilesToMetricsStabilityLevel = sl o.createProfilesToMetricsFunc = createProfilesToMetrics }) @@ -207,15 +202,25 @@ func WithProfilesToMetrics(createProfilesToMetrics CreateProfilesToMetricsFunc, // WithProfilesToLogs overrides the default "error not supported" implementation for WithProfilesToLogs and the default "undefined" stability level. func WithProfilesToLogs(createProfilesToLogs CreateProfilesToLogsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.profilesToLogsStabilityLevel = sl o.createProfilesToLogsFunc = createProfilesToLogs }) } +// WithDeprecatedTypeAlias configures a deprecated type alias for the connector. Only one alias is supported per connector. +// When the alias is used in configuration, a deprecation warning is automatically logged. +func WithDeprecatedTypeAlias(alias component.Type) FactoryOption { + return factoryOptionFunc(func(o *factory) { + o.SetDeprecatedAlias(alias) + }) +} + // factory implements the Factory interface. type factory struct { connector.Factory + componentalias.TypeAliasHolder + opts []connector.FactoryOption createTracesToProfilesFunc CreateTracesToProfilesFunc createMetricsToProfilesFunc CreateMetricsToProfilesFunc @@ -268,6 +273,9 @@ func (f *factory) CreateTracesToProfiles(ctx context.Context, set connector.Sett if f.createTracesToProfilesFunc == nil { return nil, internal.ErrDataTypes(set.ID, pipeline.SignalTraces, xpipeline.SignalProfiles) } + if err := componentalias.ValidateComponentType(f.Factory, set.ID); err != nil { + return nil, err + } return f.createTracesToProfilesFunc(ctx, set, cfg, next) } @@ -275,6 +283,9 @@ func (f *factory) CreateMetricsToProfiles(ctx context.Context, set connector.Set if f.createMetricsToProfilesFunc == nil { return nil, internal.ErrDataTypes(set.ID, pipeline.SignalMetrics, xpipeline.SignalProfiles) } + if err := componentalias.ValidateComponentType(f.Factory, set.ID); err != nil { + return nil, err + } return f.createMetricsToProfilesFunc(ctx, set, cfg, next) } @@ -282,6 +293,9 @@ func (f *factory) CreateLogsToProfiles(ctx context.Context, set connector.Settin if f.createLogsToProfilesFunc == nil { return nil, internal.ErrDataTypes(set.ID, pipeline.SignalLogs, xpipeline.SignalProfiles) } + if err := componentalias.ValidateComponentType(f.Factory, set.ID); err != nil { + return nil, err + } return f.createLogsToProfilesFunc(ctx, set, cfg, next) } @@ -289,6 +303,9 @@ func (f *factory) CreateProfilesToProfiles(ctx context.Context, set connector.Se if f.createProfilesToProfilesFunc == nil { return nil, internal.ErrDataTypes(set.ID, xpipeline.SignalProfiles, xpipeline.SignalProfiles) } + if err := componentalias.ValidateComponentType(f.Factory, set.ID); err != nil { + return nil, err + } return f.createProfilesToProfilesFunc(ctx, set, cfg, next) } @@ -296,6 +313,9 @@ func (f *factory) CreateProfilesToTraces(ctx context.Context, set connector.Sett if f.createProfilesToTracesFunc == nil { return nil, internal.ErrDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalTraces) } + if err := componentalias.ValidateComponentType(f.Factory, set.ID); err != nil { + return nil, err + } return f.createProfilesToTracesFunc(ctx, set, cfg, next) } @@ -303,6 +323,9 @@ func (f *factory) CreateProfilesToMetrics(ctx context.Context, set connector.Set if f.createProfilesToMetricsFunc == nil { return nil, internal.ErrDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalMetrics) } + if err := componentalias.ValidateComponentType(f.Factory, set.ID); err != nil { + return nil, err + } return f.createProfilesToMetricsFunc(ctx, set, cfg, next) } @@ -310,15 +333,19 @@ func (f *factory) CreateProfilesToLogs(ctx context.Context, set connector.Settin if f.createProfilesToLogsFunc == nil { return nil, internal.ErrDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalLogs) } + if err := componentalias.ValidateComponentType(f.Factory, set.ID); err != nil { + return nil, err + } return f.createProfilesToLogsFunc(ctx, set, cfg, next) } -// NewFactory returns a Factory. +// NewFactory creates a wrapped connector.Factory with experimental capabilities func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { - opts := factoryOpts{factory: &factory{}} + f := &factory{TypeAliasHolder: componentalias.NewTypeAliasHolder()} for _, opt := range options { - opt.applyOption(&opts) + opt.applyOption(f) } - opts.Factory = connector.NewFactory(cfgType, createDefaultConfig, opts.opts...) - return opts.factory + f.Factory = connector.NewFactory(cfgType, createDefaultConfig, f.opts...) + f.Factory.(componentalias.TypeAliasHolder).SetDeprecatedAlias(f.DeprecatedAlias()) + return f } diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/metadata.yaml b/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/metadata.yaml new file mode 100644 index 00000000000..cd0eba7e107 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/metadata.yaml @@ -0,0 +1,8 @@ +type: consumererror/xconsumererror +github_project: open-telemetry/opentelemetry-collector + +status: + disable_codecov_badge: true + class: consumer + stability: + development: [profiles] diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/metadata.yaml b/vendor/go.opentelemetry.io/collector/consumer/consumertest/metadata.yaml new file mode 100644 index 00000000000..944bc160ffa --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/metadata.yaml @@ -0,0 +1,6 @@ +type: consumer/consumertest +github_project: open-telemetry/opentelemetry-collector + +status: + disable_codecov_badge: true + class: pkg diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/sink.go b/vendor/go.opentelemetry.io/collector/consumer/consumertest/sink.go index ec5cca1efb5..3c83ee01614 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/consumertest/sink.go +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/sink.go @@ -201,10 +201,11 @@ func (sle *LogsSink) Contexts() []context.Context { // stores all profiles and allows querying them for testing. type ProfilesSink struct { nonMutatingConsumer - mu sync.Mutex - profiles []pprofile.Profiles - contexts []context.Context - sampleCount int + mu sync.Mutex + profiles []pprofile.Profiles + contexts []context.Context + sampleCount int + profileCount int } var _ xconsumer.Profiles = (*ProfilesSink)(nil) @@ -217,6 +218,7 @@ func (ste *ProfilesSink) ConsumeProfiles(ctx context.Context, td pprofile.Profil ste.profiles = append(ste.profiles, td) ste.contexts = append(ste.contexts, ctx) ste.sampleCount += td.SampleCount() + ste.profileCount += td.ProfileCount() return nil } @@ -231,13 +233,20 @@ func (ste *ProfilesSink) AllProfiles() []pprofile.Profiles { return copyProfiles } -// SampleCount returns the number of profiles stored by this sink since last Reset. +// SampleCount returns the number of samples stored by this sink since last Reset. func (ste *ProfilesSink) SampleCount() int { ste.mu.Lock() defer ste.mu.Unlock() return ste.sampleCount } +// ProfileCount returns the number of profiles stored by this sink since last Reset. +func (ste *ProfilesSink) ProfileCount() int { + ste.mu.Lock() + defer ste.mu.Unlock() + return ste.profileCount +} + // Reset deletes any stored data. func (ste *ProfilesSink) Reset() { ste.mu.Lock() @@ -246,6 +255,7 @@ func (ste *ProfilesSink) Reset() { ste.profiles = nil ste.contexts = nil ste.sampleCount = 0 + ste.profileCount = 0 } // Contexts returns the contexts stored by this sink since last Reset. diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporter.go b/vendor/go.opentelemetry.io/collector/exporter/exporter.go index 2df5ab6853b..7bb908c8200 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporter.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporter.go @@ -8,7 +8,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/exporter/internal/experr" + "go.opentelemetry.io/collector/internal/componentalias" "go.opentelemetry.io/collector/pipeline" ) @@ -105,6 +105,7 @@ type CreateLogsFunc func(context.Context, Settings, component.Config) (Logs, err type factory struct { cfgType component.Type component.CreateDefaultConfigFunc + componentalias.TypeAliasHolder createTracesFunc CreateTracesFunc tracesStabilityLevel component.StabilityLevel createMetricsFunc CreateMetricsFunc @@ -136,8 +137,8 @@ func (f *factory) CreateTraces(ctx context.Context, set Settings, cfg component. return nil, pipeline.ErrSignalNotSupported } - if set.ID.Type() != f.Type() { - return nil, experr.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createTracesFunc(ctx, set, cfg) @@ -148,8 +149,8 @@ func (f *factory) CreateMetrics(ctx context.Context, set Settings, cfg component return nil, pipeline.ErrSignalNotSupported } - if set.ID.Type() != f.Type() { - return nil, experr.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createMetricsFunc(ctx, set, cfg) @@ -160,8 +161,8 @@ func (f *factory) CreateLogs(ctx context.Context, set Settings, cfg component.Co return nil, pipeline.ErrSignalNotSupported } - if set.ID.Type() != f.Type() { - return nil, experr.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createLogsFunc(ctx, set, cfg) @@ -196,6 +197,7 @@ func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefa f := &factory{ cfgType: cfgType, CreateDefaultConfigFunc: createDefaultConfig, + TypeAliasHolder: componentalias.NewTypeAliasHolder(), } for _, opt := range options { opt.applyOption(f) diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/README.md b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/README.md index 88f7b50bbff..e47d5e47d54 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/README.md +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/README.md @@ -36,7 +36,7 @@ Batch settings are available in the sending queue. Batching is disabled, by defa batch settings, use `batch: {}`. When `batch` is defined, the settings are: - `flush_timeout` (default = 200 ms): time after which a batch will be sent regardless of its size. Must be a non-zero value; -- `min_size` (default = 8192): the minimum size of a batch; +- `min_size` (default = 8192): the minimum size of a batch; should be less than or equal to the `sending_queue::queue_size` if `sending_queue::batch::sizer` matches `sending_queue::sizer`. - `max_size` (default = 0): the maximum size of a batch, enables batch splitting. The maximum size of a batch should be greater than or equal to the minimum size of a batch. If set to zero, there is no maximum size; - `sizer`: see below. @@ -116,7 +116,7 @@ receivers: protocols: grpc: exporters: - otlp: + otlp_grpc: endpoint: sending_queue: storage: file_storage/otc diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/config.schema.yaml b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/config.schema.yaml new file mode 100644 index 00000000000..e3f50a147b8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/config.schema.yaml @@ -0,0 +1,14 @@ +$defs: + batch_config: + description: BatchConfig defines a configuration for batching requests based on a timeout and a minimum number of items. + $ref: ./internal/queuebatch.batch_config + option: + description: Option apply changes to BaseExporter. + $ref: ./internal.option + queue_batch_config: + description: QueueBatchConfig defines configuration for queueing and batching for the exporter. + $ref: ./internal/queuebatch.config + request_sizer_type: + $ref: ./internal/request.sizer_type + timeout_config: + $ref: ./internal.timeout_config diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md index 9b00977f9dc..3bead1365ac 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md @@ -8,104 +8,128 @@ The following telemetry is emitted by this component. ### otelcol_exporter_enqueue_failed_log_records -Number of log records failed to be added to the sending queue. [alpha] +Number of log records failed to be added to the sending queue. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {records} | Sum | Int | true | alpha | +| {records} | Sum | Int | true | Alpha | ### otelcol_exporter_enqueue_failed_metric_points -Number of metric points failed to be added to the sending queue. [alpha] +Number of metric points failed to be added to the sending queue. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {datapoints} | Sum | Int | true | alpha | +| {datapoints} | Sum | Int | true | Alpha | + +### otelcol_exporter_enqueue_failed_profile_samples + +Number of profile samples failed to be added to the sending queue. + +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {samples} | Sum | Int | true | Development | ### otelcol_exporter_enqueue_failed_spans -Number of spans failed to be added to the sending queue. [alpha] +Number of spans failed to be added to the sending queue. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {spans} | Sum | Int | true | alpha | +| {spans} | Sum | Int | true | Alpha | ### otelcol_exporter_queue_batch_send_size Number of units in the batch -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {units} | Histogram | Int | +| Unit | Metric Type | Value Type | Stability | +| ---- | ----------- | ---------- | --------- | +| {units} | Histogram | Int | Development | ### otelcol_exporter_queue_batch_send_size_bytes Number of bytes in batch that was sent. Only available on detailed level. -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Histogram | Int | +| Unit | Metric Type | Value Type | Stability | +| ---- | ----------- | ---------- | --------- | +| By | Histogram | Int | Development | ### otelcol_exporter_queue_capacity -Fixed capacity of the retry queue (in batches). [alpha] +Fixed capacity of the retry queue (in batches). | Unit | Metric Type | Value Type | Stability | | ---- | ----------- | ---------- | --------- | -| {batches} | Gauge | Int | alpha | +| {batches} | Gauge | Int | Alpha | ### otelcol_exporter_queue_size -Current size of the retry queue (in batches). [alpha] +Current size of the retry queue (in batches). | Unit | Metric Type | Value Type | Stability | | ---- | ----------- | ---------- | --------- | -| {batches} | Gauge | Int | alpha | +| {batches} | Gauge | Int | Alpha | ### otelcol_exporter_send_failed_log_records -Number of log records in failed attempts to send to destination. [alpha] +Number of log records in failed attempts to send to destination. At detailed telemetry level, includes attributes: error.type (semantic convention), error.permanent. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {records} | Sum | Int | true | alpha | +| {records} | Sum | Int | true | Alpha | ### otelcol_exporter_send_failed_metric_points -Number of metric points in failed attempts to send to destination. [alpha] +Number of metric points in failed attempts to send to destination. At detailed telemetry level, includes attributes: error.type (semantic convention), error.permanent. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {datapoints} | Sum | Int | true | alpha | +| {datapoints} | Sum | Int | true | Alpha | + +### otelcol_exporter_send_failed_profile_samples + +Number of profile samples in failed attempts to send to destination. At detailed telemetry level, includes attributes: error.type (semantic convention), error.permanent. + +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {samples} | Sum | Int | true | Development | ### otelcol_exporter_send_failed_spans -Number of spans in failed attempts to send to destination. [alpha] +Number of spans in failed attempts to send to destination. At detailed telemetry level, includes attributes: error.type (semantic convention), error.permanent. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {spans} | Sum | Int | true | alpha | +| {spans} | Sum | Int | true | Alpha | ### otelcol_exporter_sent_log_records -Number of log record successfully sent to destination. [alpha] +Number of log record successfully sent to destination. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {records} | Sum | Int | true | alpha | +| {records} | Sum | Int | true | Alpha | ### otelcol_exporter_sent_metric_points -Number of metric points successfully sent to destination. [alpha] +Number of metric points successfully sent to destination. + +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {datapoints} | Sum | Int | true | Alpha | + +### otelcol_exporter_sent_profile_samples + +Number of profile samples successfully sent to destination. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {datapoints} | Sum | Int | true | alpha | +| {samples} | Sum | Int | true | Development | ### otelcol_exporter_sent_spans -Number of spans successfully sent to destination. [alpha] +Number of spans successfully sent to destination. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {spans} | Sum | Int | true | alpha | +| {spans} | Sum | Int | true | Alpha | diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/base_exporter.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/base_exporter.go index 147ff280a7e..884ba46ca33 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/base_exporter.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/base_exporter.go @@ -11,6 +11,7 @@ import ( "go.uber.org/zap" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configoptional" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter" @@ -47,7 +48,7 @@ type BaseExporter struct { retryCfg configretry.BackOffConfig queueBatchSettings queuebatch.Settings[request.Request] - queueCfg queuebatch.Config + queueCfg configoptional.Optional[queuebatch.Config] } func NewBaseExporter(set exporter.Settings, signal pipeline.Signal, pusher sender.SendFunc[request.Request], options ...Option) (*BaseExporter, error) { @@ -82,19 +83,19 @@ func NewBaseExporter(set exporter.Settings, signal pipeline.Signal, pusher sende return nil, err } - if be.queueCfg.Batch.HasValue() { + if be.queueCfg.HasValue() && be.queueCfg.Get().Batch.HasValue() { // Batcher mutates the data. be.ConsumerOptions = append(be.ConsumerOptions, consumer.WithCapabilities(consumer.Capabilities{MutatesData: true})) } - if be.queueCfg.Enabled { + if be.queueCfg.HasValue() { qSet := queuebatch.AllSettings[request.Request]{ Settings: be.queueBatchSettings, Signal: signal, ID: set.ID, Telemetry: set.TelemetrySettings, } - be.QueueSender, err = NewQueueSender(qSet, be.queueCfg, be.ExportFailureMessage, be.firstSender) + be.QueueSender, err = NewQueueSender(qSet, *be.queueCfg.Get(), be.ExportFailureMessage, be.firstSender) if err != nil { return nil, err } @@ -191,7 +192,7 @@ func WithRetry(config configretry.BackOffConfig) Option { // WithQueue overrides the default queuebatch.Config for an exporter. // The default queuebatch.Config is to disable queueing. // This option cannot be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. -func WithQueue(cfg queuebatch.Config) Option { +func WithQueue(cfg configoptional.Optional[queuebatch.Config]) Option { return func(o *BaseExporter) error { if o.queueBatchSettings.Encoding == nil { return errors.New("WithQueue option is not available for the new request exporters, use WithQueueBatch instead") @@ -204,13 +205,13 @@ func WithQueue(cfg queuebatch.Config) Option { // This option should be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func WithQueueBatch(cfg queuebatch.Config, set queuebatch.Settings[request.Request]) Option { +func WithQueueBatch(cfg configoptional.Optional[queuebatch.Config], set queuebatch.Settings[request.Request]) Option { return func(o *BaseExporter) error { - if !cfg.Enabled { + if !cfg.HasValue() { o.ExportFailureMessage += " Try enabling sending_queue to survive temporary failures." return nil } - if cfg.StorageID != nil && set.Encoding == nil { + if cfg.Get().StorageID != nil && set.Encoding == nil { return errors.New("`Settings.Encoding` must not be nil when persistent queue is enabled") } o.queueBatchSettings = set diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/config.schema.yaml b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/config.schema.yaml new file mode 100644 index 00000000000..91712d54f97 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/config.schema.yaml @@ -0,0 +1,10 @@ +$defs: + timeout_config: + description: TimeoutConfig for timeout. The timeout applies to individual attempts to send data to the backend. + type: object + properties: + timeout: + description: Timeout is the timeout for every attempt to send data to the backend. A zero timeout means no timeout. + type: string + x-customType: time.Duration + format: duration diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go index 66114dd2d3c..b4da8c27a14 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go @@ -25,22 +25,25 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer { // TelemetryBuilder provides an interface for components to report telemetry // as defined in metadata and user config. type TelemetryBuilder struct { - meter metric.Meter - mu sync.Mutex - registrations []metric.Registration - ExporterEnqueueFailedLogRecords metric.Int64Counter - ExporterEnqueueFailedMetricPoints metric.Int64Counter - ExporterEnqueueFailedSpans metric.Int64Counter - ExporterQueueBatchSendSize metric.Int64Histogram - ExporterQueueBatchSendSizeBytes metric.Int64Histogram - ExporterQueueCapacity metric.Int64ObservableGauge - ExporterQueueSize metric.Int64ObservableGauge - ExporterSendFailedLogRecords metric.Int64Counter - ExporterSendFailedMetricPoints metric.Int64Counter - ExporterSendFailedSpans metric.Int64Counter - ExporterSentLogRecords metric.Int64Counter - ExporterSentMetricPoints metric.Int64Counter - ExporterSentSpans metric.Int64Counter + meter metric.Meter + mu sync.Mutex + registrations []metric.Registration + ExporterEnqueueFailedLogRecords metric.Int64Counter + ExporterEnqueueFailedMetricPoints metric.Int64Counter + ExporterEnqueueFailedProfileSamples metric.Int64Counter + ExporterEnqueueFailedSpans metric.Int64Counter + ExporterQueueBatchSendSize metric.Int64Histogram + ExporterQueueBatchSendSizeBytes metric.Int64Histogram + ExporterQueueCapacity metric.Int64ObservableGauge + ExporterQueueSize metric.Int64ObservableGauge + ExporterSendFailedLogRecords metric.Int64Counter + ExporterSendFailedMetricPoints metric.Int64Counter + ExporterSendFailedProfileSamples metric.Int64Counter + ExporterSendFailedSpans metric.Int64Counter + ExporterSentLogRecords metric.Int64Counter + ExporterSentMetricPoints metric.Int64Counter + ExporterSentProfileSamples metric.Int64Counter + ExporterSentSpans metric.Int64Counter } // TelemetryBuilderOption applies changes to default builder. @@ -114,81 +117,99 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...Teleme var err, errs error builder.ExporterEnqueueFailedLogRecords, err = builder.meter.Int64Counter( "otelcol_exporter_enqueue_failed_log_records", - metric.WithDescription("Number of log records failed to be added to the sending queue. [alpha]"), + metric.WithDescription("Number of log records failed to be added to the sending queue. [Alpha]"), metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ExporterEnqueueFailedMetricPoints, err = builder.meter.Int64Counter( "otelcol_exporter_enqueue_failed_metric_points", - metric.WithDescription("Number of metric points failed to be added to the sending queue. [alpha]"), + metric.WithDescription("Number of metric points failed to be added to the sending queue. [Alpha]"), metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) + builder.ExporterEnqueueFailedProfileSamples, err = builder.meter.Int64Counter( + "otelcol_exporter_enqueue_failed_profile_samples", + metric.WithDescription("Number of profile samples failed to be added to the sending queue. [Development]"), + metric.WithUnit("{samples}"), + ) + errs = errors.Join(errs, err) builder.ExporterEnqueueFailedSpans, err = builder.meter.Int64Counter( "otelcol_exporter_enqueue_failed_spans", - metric.WithDescription("Number of spans failed to be added to the sending queue. [alpha]"), + metric.WithDescription("Number of spans failed to be added to the sending queue. [Alpha]"), metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) builder.ExporterQueueBatchSendSize, err = builder.meter.Int64Histogram( "otelcol_exporter_queue_batch_send_size", - metric.WithDescription("Number of units in the batch"), + metric.WithDescription("Number of units in the batch [Development]"), metric.WithUnit("{units}"), metric.WithExplicitBucketBoundaries([]float64{10, 25, 50, 75, 100, 250, 500, 750, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 20000, 30000, 50000, 100000}...), ) errs = errors.Join(errs, err) builder.ExporterQueueBatchSendSizeBytes, err = builder.meter.Int64Histogram( "otelcol_exporter_queue_batch_send_size_bytes", - metric.WithDescription("Number of bytes in batch that was sent. Only available on detailed level."), + metric.WithDescription("Number of bytes in batch that was sent. Only available on detailed level. [Development]"), metric.WithUnit("By"), metric.WithExplicitBucketBoundaries([]float64{10, 25, 50, 75, 100, 250, 500, 750, 1000, 2000, 3000, 4000, 5000, 6000}...), ) errs = errors.Join(errs, err) builder.ExporterQueueCapacity, err = builder.meter.Int64ObservableGauge( "otelcol_exporter_queue_capacity", - metric.WithDescription("Fixed capacity of the retry queue (in batches). [alpha]"), + metric.WithDescription("Fixed capacity of the retry queue (in batches). [Alpha]"), metric.WithUnit("{batches}"), ) errs = errors.Join(errs, err) builder.ExporterQueueSize, err = builder.meter.Int64ObservableGauge( "otelcol_exporter_queue_size", - metric.WithDescription("Current size of the retry queue (in batches). [alpha]"), + metric.WithDescription("Current size of the retry queue (in batches). [Alpha]"), metric.WithUnit("{batches}"), ) errs = errors.Join(errs, err) builder.ExporterSendFailedLogRecords, err = builder.meter.Int64Counter( "otelcol_exporter_send_failed_log_records", - metric.WithDescription("Number of log records in failed attempts to send to destination. [alpha]"), + metric.WithDescription("Number of log records in failed attempts to send to destination. At detailed telemetry level, includes attributes: error.type (semantic convention), error.permanent. [Alpha]"), metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ExporterSendFailedMetricPoints, err = builder.meter.Int64Counter( "otelcol_exporter_send_failed_metric_points", - metric.WithDescription("Number of metric points in failed attempts to send to destination. [alpha]"), + metric.WithDescription("Number of metric points in failed attempts to send to destination. At detailed telemetry level, includes attributes: error.type (semantic convention), error.permanent. [Alpha]"), metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) + builder.ExporterSendFailedProfileSamples, err = builder.meter.Int64Counter( + "otelcol_exporter_send_failed_profile_samples", + metric.WithDescription("Number of profile samples in failed attempts to send to destination. At detailed telemetry level, includes attributes: error.type (semantic convention), error.permanent. [Development]"), + metric.WithUnit("{samples}"), + ) + errs = errors.Join(errs, err) builder.ExporterSendFailedSpans, err = builder.meter.Int64Counter( "otelcol_exporter_send_failed_spans", - metric.WithDescription("Number of spans in failed attempts to send to destination. [alpha]"), + metric.WithDescription("Number of spans in failed attempts to send to destination. At detailed telemetry level, includes attributes: error.type (semantic convention), error.permanent. [Alpha]"), metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) builder.ExporterSentLogRecords, err = builder.meter.Int64Counter( "otelcol_exporter_sent_log_records", - metric.WithDescription("Number of log record successfully sent to destination. [alpha]"), + metric.WithDescription("Number of log record successfully sent to destination. [Alpha]"), metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ExporterSentMetricPoints, err = builder.meter.Int64Counter( "otelcol_exporter_sent_metric_points", - metric.WithDescription("Number of metric points successfully sent to destination. [alpha]"), + metric.WithDescription("Number of metric points successfully sent to destination. [Alpha]"), metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) + builder.ExporterSentProfileSamples, err = builder.meter.Int64Counter( + "otelcol_exporter_sent_profile_samples", + metric.WithDescription("Number of profile samples successfully sent to destination. [Development]"), + metric.WithUnit("{samples}"), + ) + errs = errors.Join(errs, err) builder.ExporterSentSpans, err = builder.meter.Int64Counter( "otelcol_exporter_sent_spans", - metric.WithDescription("Number of spans successfully sent to destination. [alpha]"), + metric.WithDescription("Number of spans successfully sent to destination. [Alpha]"), metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obs_report_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obs_report_sender.go index 3438863ff51..db0538c8e87 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obs_report_sender.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obs_report_sender.go @@ -5,19 +5,26 @@ package internal // import "go.opentelemetry.io/collector/exporter/exporterhelpe import ( "context" + "errors" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" + otelcodes "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.38.0" "go.opentelemetry.io/otel/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal/experr" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/request" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/sender" "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" ) const ( @@ -34,6 +41,9 @@ const ( ItemsSent = "items.sent" // ItemsFailed used to track number of items that failed to be sent by exporters. ItemsFailed = "items.failed" + + // ErrorPermanentKey indicates whether the error is permanent (non-retryable). + ErrorPermanentKey = "error.permanent" ) type obsReportSender[K request.Request] struct { @@ -78,6 +88,9 @@ func newObsReportSender[K request.Request](set exporter.Settings, signal pipelin case pipeline.SignalLogs: or.itemsSentInst = telemetryBuilder.ExporterSentLogRecords or.itemsFailedInst = telemetryBuilder.ExporterSendFailedLogRecords + case xpipeline.SignalProfiles: + or.itemsSentInst = telemetryBuilder.ExporterSentProfileSamples + or.itemsFailedInst = telemetryBuilder.ExporterSendFailedProfileSamples } return or, nil @@ -105,16 +118,16 @@ func (ors *obsReportSender[K]) startOp(ctx context.Context) context.Context { } // EndOp completes the export operation that was started with StartOp. -func (ors *obsReportSender[K]) endOp(ctx context.Context, numLogRecords int, err error) { - numSent, numFailedToSend := toNumItems(numLogRecords, err) +func (ors *obsReportSender[K]) endOp(ctx context.Context, numRecords int, err error) { + numSent, numFailedToSend := toNumItems(numRecords, err) - // No metrics recorded for profiles. if ors.itemsSentInst != nil { ors.itemsSentInst.Add(ctx, numSent, ors.metricAttr) } - // No metrics recorded for profiles. - if ors.itemsFailedInst != nil { - ors.itemsFailedInst.Add(ctx, numFailedToSend, ors.metricAttr) + + if ors.itemsFailedInst != nil && numFailedToSend > 0 { + withFailedAttrs := metric.WithAttributeSet(extractFailureAttributes(err)) + ors.itemsFailedInst.Add(ctx, numFailedToSend, ors.metricAttr, withFailedAttrs) } span := trace.SpanFromContext(ctx) @@ -126,7 +139,7 @@ func (ors *obsReportSender[K]) endOp(ctx context.Context, numLogRecords int, err attribute.Int64(ItemsFailed, numFailedToSend), ) if err != nil { - span.SetStatus(codes.Error, err.Error()) + span.SetStatus(otelcodes.Error, err.Error()) } } } @@ -137,3 +150,38 @@ func toNumItems(numExportedItems int, err error) (int64, int64) { } return int64(numExportedItems), 0 } + +func extractFailureAttributes(err error) attribute.Set { + if err == nil { + return attribute.NewSet() + } + + attrs := []attribute.KeyValue{} + + errorType := determineErrorType(err) + attrs = append(attrs, attribute.String(string(semconv.ErrorTypeKey), errorType)) + + isPermanent := consumererror.IsPermanent(err) + attrs = append(attrs, attribute.Bool(ErrorPermanentKey, isPermanent)) + + return attribute.NewSet(attrs...) +} + +func determineErrorType(err error) string { + if experr.IsShutdownErr(err) { + return "Shutdown" + } + + if errors.Is(err, context.Canceled) { + return "Canceled" + } + if errors.Is(err, context.DeadlineExceeded) { + return "Deadline_Exceeded" + } + + if st, ok := status.FromError(err); ok && st.Code() != codes.OK { + return st.Code().String() + } + + return "_OTHER" +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/async_queue.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/async_queue.go index 99304dd3774..d5e8a1618e2 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/async_queue.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/async_queue.go @@ -36,11 +36,9 @@ func (qc *asyncQueue[T]) Start(ctx context.Context, host component.Host) error { } var startWG sync.WaitGroup for i := 0; i < qc.numConsumers; i++ { - qc.stopWG.Add(1) startWG.Add(1) - go func() { //nolint:contextcheck + qc.stopWG.Go(func() { //nolint:contextcheck startWG.Done() - defer qc.stopWG.Done() for { ctx, req, done, ok := qc.Read(context.Background()) if !ok { @@ -51,7 +49,7 @@ func (qc *asyncQueue[T]) Start(ctx context.Context, host component.Host) error { qc.refCounter.Unref(req) } } - }() + }) } startWG.Wait() diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/meta.pb.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/meta.pb.go index a89fa718fee..23c68d4a9ab 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/meta.pb.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/meta.pb.go @@ -1,33 +1,34 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 // source: exporter/exporterhelper/internal/queue/meta.proto package queue import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" + reflect "reflect" + sync "sync" - proto "github.com/gogo/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // PersistentMetadata holds all persistent metadata for the queue. // The items and bytes sizes are recorded explicitly, // the requests size can be calculated as (write_index - read_index + len(currently_dispatched_items)). type PersistentMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Current total items size of the queue. ItemsSize int64 `protobuf:"fixed64,1,opt,name=items_size,json=itemsSize,proto3" json:"items_size,omitempty"` // Current total bytes size of the queue. @@ -40,424 +41,163 @@ type PersistentMetadata struct { CurrentlyDispatchedItems []uint64 `protobuf:"fixed64,5,rep,packed,name=currently_dispatched_items,json=currentlyDispatchedItems,proto3" json:"currently_dispatched_items,omitempty"` } -func (m *PersistentMetadata) Reset() { *m = PersistentMetadata{} } -func (m *PersistentMetadata) String() string { return proto.CompactTextString(m) } -func (*PersistentMetadata) ProtoMessage() {} -func (*PersistentMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_8ad1b29fca6d4f37, []int{0} -} -func (m *PersistentMetadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PersistentMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PersistentMetadata.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *PersistentMetadata) Reset() { + *x = PersistentMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_exporter_exporterhelper_internal_queue_meta_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *PersistentMetadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_PersistentMetadata.Merge(m, src) -} -func (m *PersistentMetadata) XXX_Size() int { - return m.Size() -} -func (m *PersistentMetadata) XXX_DiscardUnknown() { - xxx_messageInfo_PersistentMetadata.DiscardUnknown(m) + +func (x *PersistentMetadata) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_PersistentMetadata proto.InternalMessageInfo +func (*PersistentMetadata) ProtoMessage() {} -func (m *PersistentMetadata) GetItemsSize() int64 { - if m != nil { - return m.ItemsSize +func (x *PersistentMetadata) ProtoReflect() protoreflect.Message { + mi := &file_exporter_exporterhelper_internal_queue_meta_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *PersistentMetadata) GetBytesSize() int64 { - if m != nil { - return m.BytesSize - } - return 0 +// Deprecated: Use PersistentMetadata.ProtoReflect.Descriptor instead. +func (*PersistentMetadata) Descriptor() ([]byte, []int) { + return file_exporter_exporterhelper_internal_queue_meta_proto_rawDescGZIP(), []int{0} } -func (m *PersistentMetadata) GetReadIndex() uint64 { - if m != nil { - return m.ReadIndex +func (x *PersistentMetadata) GetItemsSize() int64 { + if x != nil { + return x.ItemsSize } return 0 } -func (m *PersistentMetadata) GetWriteIndex() uint64 { - if m != nil { - return m.WriteIndex +func (x *PersistentMetadata) GetBytesSize() int64 { + if x != nil { + return x.BytesSize } return 0 } -func (m *PersistentMetadata) GetCurrentlyDispatchedItems() []uint64 { - if m != nil { - return m.CurrentlyDispatchedItems +func (x *PersistentMetadata) GetReadIndex() uint64 { + if x != nil { + return x.ReadIndex } - return nil -} - -func init() { - proto.RegisterType((*PersistentMetadata)(nil), "opentelemetry.collector.exporter.exporterhelper.internal.queue.PersistentMetadata") -} - -func init() { - proto.RegisterFile("exporter/exporterhelper/internal/queue/meta.proto", fileDescriptor_8ad1b29fca6d4f37) -} - -var fileDescriptor_8ad1b29fca6d4f37 = []byte{ - // 287 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xb1, 0x4a, 0x03, 0x41, - 0x14, 0x45, 0x33, 0x46, 0x03, 0x19, 0x1b, 0xd9, 0x6a, 0x11, 0x5c, 0x83, 0x55, 0xaa, 0x59, 0xc4, - 0x56, 0x2c, 0x24, 0x4d, 0x0a, 0x41, 0x62, 0x67, 0xe1, 0x32, 0xd9, 0xbd, 0x98, 0x81, 0xc9, 0xcc, - 0xfa, 0xf6, 0x05, 0x93, 0x7c, 0x85, 0x9f, 0x65, 0x99, 0xd2, 0x52, 0x92, 0x1f, 0x91, 0x9d, 0x64, - 0x17, 0xd2, 0xd9, 0x3d, 0xce, 0xb9, 0x5c, 0x1e, 0x57, 0xde, 0x62, 0x59, 0x7a, 0x62, 0x50, 0xda, - 0x1c, 0x33, 0xd8, 0x12, 0x94, 0x1a, 0xc7, 0x20, 0xa7, 0x6d, 0xfa, 0xb1, 0xc0, 0x02, 0xe9, 0x1c, - 0xac, 0x55, 0x49, 0x9e, 0x7d, 0xf4, 0xe0, 0x4b, 0x38, 0x86, 0xc5, 0x1c, 0x4c, 0x2b, 0x95, 0x7b, - 0x6b, 0x91, 0xb3, 0x27, 0xd5, 0x34, 0xa8, 0xe3, 0x2a, 0xd5, 0x54, 0xa9, 0x50, 0x75, 0xb3, 0x11, - 0x32, 0x7a, 0x06, 0x55, 0xa6, 0x62, 0x38, 0x7e, 0x02, 0xeb, 0x42, 0xb3, 0x8e, 0xae, 0xa4, 0x34, - 0x8c, 0x79, 0x95, 0x55, 0x66, 0x8d, 0x58, 0x0c, 0xc4, 0xf0, 0x62, 0xd2, 0x0f, 0xe4, 0xc5, 0xac, - 0x51, 0xeb, 0xe9, 0x8a, 0x71, 0xd0, 0x27, 0x7b, 0x1d, 0x48, 0xa3, 0x09, 0xba, 0xc8, 0x8c, 0x2b, - 0xb0, 0x8c, 0xbb, 0x03, 0x31, 0xec, 0x4d, 0xfa, 0x35, 0x19, 0xd7, 0x20, 0xba, 0x96, 0xe7, 0x9f, - 0x64, 0x18, 0x07, 0x7f, 0x1a, 0xbc, 0x0c, 0x68, 0x1f, 0xb8, 0x97, 0x97, 0xf9, 0x82, 0x08, 0x8e, - 0xed, 0x2a, 0x2b, 0x4c, 0x55, 0x6a, 0xce, 0x67, 0x28, 0xb2, 0xf0, 0x40, 0x7c, 0x36, 0xe8, 0x0e, - 0x7b, 0x93, 0xb8, 0x4d, 0x8c, 0xda, 0xc0, 0xb8, 0xf6, 0x8f, 0x6f, 0xdf, 0xdb, 0x44, 0x6c, 0xb6, - 0x89, 0xf8, 0xdd, 0x26, 0xe2, 0x6b, 0x97, 0x74, 0x36, 0xbb, 0xa4, 0xf3, 0xb3, 0x4b, 0x3a, 0xaf, - 0xa3, 0x77, 0xaf, 0x8e, 0xf7, 0x32, 0x3e, 0x6d, 0x27, 0x4b, 0xff, 0xb7, 0xfe, 0xb4, 0x17, 0x96, - 0xbf, 0xfb, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x15, 0x90, 0x67, 0x5e, 0xae, 0x01, 0x00, 0x00, + return 0 } -func (m *PersistentMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *PersistentMetadata) GetWriteIndex() uint64 { + if x != nil { + return x.WriteIndex } - return dAtA[:n], nil -} - -func (m *PersistentMetadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return 0 } -func (m *PersistentMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.CurrentlyDispatchedItems) > 0 { - for iNdEx := len(m.CurrentlyDispatchedItems) - 1; iNdEx >= 0; iNdEx-- { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.CurrentlyDispatchedItems[iNdEx])) - } - i = encodeVarintMeta(dAtA, i, uint64(len(m.CurrentlyDispatchedItems)*8)) - i-- - dAtA[i] = 0x2a - } - if m.WriteIndex != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.WriteIndex)) - i-- - dAtA[i] = 0x21 - } - if m.ReadIndex != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ReadIndex)) - i-- - dAtA[i] = 0x19 - } - if m.BytesSize != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.BytesSize)) - i-- - dAtA[i] = 0x11 +func (x *PersistentMetadata) GetCurrentlyDispatchedItems() []uint64 { + if x != nil { + return x.CurrentlyDispatchedItems } - if m.ItemsSize != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ItemsSize)) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil + return nil } -func encodeVarintMeta(dAtA []byte, offset int, v uint64) int { - offset -= sovMeta(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PersistentMetadata) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ItemsSize != 0 { - n += 9 - } - if m.BytesSize != 0 { - n += 9 - } - if m.ReadIndex != 0 { - n += 9 - } - if m.WriteIndex != 0 { - n += 9 - } - if len(m.CurrentlyDispatchedItems) > 0 { - n += 1 + sovMeta(uint64(len(m.CurrentlyDispatchedItems)*8)) + len(m.CurrentlyDispatchedItems)*8 - } - return n +var File_exporter_exporterhelper_internal_queue_meta_proto protoreflect.FileDescriptor + +var file_exporter_exporterhelper_internal_queue_meta_proto_rawDesc = []byte{ + 0x0a, 0x31, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x72, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x3e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x65, 0x78, 0x70, + 0x6f, 0x72, 0x74, 0x65, 0x72, 0x2e, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x68, 0x65, + 0x6c, 0x70, 0x65, 0x72, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x71, 0x75, + 0x65, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x12, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x74, + 0x65, 0x6d, 0x73, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x10, 0x52, 0x09, + 0x69, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x10, 0x52, 0x09, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x09, 0x72, 0x65, + 0x61, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0a, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x3c, 0x0a, 0x1a, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, + 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x06, 0x52, 0x18, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x64, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, + 0x2f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func sovMeta(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMeta(x uint64) (n int) { - return sovMeta(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PersistentMetadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMeta - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ItemsSize", wireType) - } - m.ItemsSize = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.ItemsSize = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field BytesSize", wireType) - } - m.BytesSize = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.BytesSize = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadIndex", wireType) - } - m.ReadIndex = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.ReadIndex = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field WriteIndex", wireType) - } - m.WriteIndex = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.WriteIndex = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 5: - if wireType == 1 { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.CurrentlyDispatchedItems = append(m.CurrentlyDispatchedItems, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMeta - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthMeta - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthMeta - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - elementCount = packedLen / 8 - if elementCount != 0 && len(m.CurrentlyDispatchedItems) == 0 { - m.CurrentlyDispatchedItems = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.CurrentlyDispatchedItems = append(m.CurrentlyDispatchedItems, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentlyDispatchedItems", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipMeta(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMeta - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } +var ( + file_exporter_exporterhelper_internal_queue_meta_proto_rawDescOnce sync.Once + file_exporter_exporterhelper_internal_queue_meta_proto_rawDescData = file_exporter_exporterhelper_internal_queue_meta_proto_rawDesc +) - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMeta(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMeta - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break +func file_exporter_exporterhelper_internal_queue_meta_proto_rawDescGZIP() []byte { + file_exporter_exporterhelper_internal_queue_meta_proto_rawDescOnce.Do(func() { + file_exporter_exporterhelper_internal_queue_meta_proto_rawDescData = protoimpl.X.CompressGZIP(file_exporter_exporterhelper_internal_queue_meta_proto_rawDescData) + }) + return file_exporter_exporterhelper_internal_queue_meta_proto_rawDescData +} + +var file_exporter_exporterhelper_internal_queue_meta_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_exporter_exporterhelper_internal_queue_meta_proto_goTypes = []interface{}{ + (*PersistentMetadata)(nil), // 0: opentelemetry.collector.exporter.exporterhelper.internal.queue.PersistentMetadata +} +var file_exporter_exporterhelper_internal_queue_meta_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_exporter_exporterhelper_internal_queue_meta_proto_init() } +func file_exporter_exporterhelper_internal_queue_meta_proto_init() { + if File_exporter_exporterhelper_internal_queue_meta_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_exporter_exporterhelper_internal_queue_meta_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PersistentMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMeta - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMeta - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMeta - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMeta - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMeta - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_exporter_exporterhelper_internal_queue_meta_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_exporter_exporterhelper_internal_queue_meta_proto_goTypes, + DependencyIndexes: file_exporter_exporterhelper_internal_queue_meta_proto_depIdxs, + MessageInfos: file_exporter_exporterhelper_internal_queue_meta_proto_msgTypes, + }.Build() + File_exporter_exporterhelper_internal_queue_meta_proto = out.File + file_exporter_exporterhelper_internal_queue_meta_proto_rawDesc = nil + file_exporter_exporterhelper_internal_queue_meta_proto_goTypes = nil + file_exporter_exporterhelper_internal_queue_meta_proto_depIdxs = nil } - -var ( - ErrInvalidLengthMeta = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMeta = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMeta = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/obs_queue.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/obs_queue.go index 8cad6962067..8900d83d709 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/obs_queue.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/obs_queue.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/request" "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" ) const ( @@ -74,6 +75,8 @@ func newObsQueue[T request.Request](set Settings[T], delegate Queue[T]) (Queue[T or.enqueueFailedInst = tb.ExporterEnqueueFailedMetricPoints case pipeline.SignalLogs: or.enqueueFailedInst = tb.ExporterEnqueueFailedLogRecords + case xpipeline.SignalProfiles: + or.enqueueFailedInst = tb.ExporterEnqueueFailedProfileSamples } or.queueBatchSizeInst = tb.ExporterQueueBatchSendSize diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/persistent_queue.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/persistent_queue.go index eb54bef2c4e..4fcb074b6ac 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/persistent_queue.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue/persistent_queue.go @@ -12,6 +12,7 @@ import ( "sync" "go.uber.org/zap" + "google.golang.org/protobuf/proto" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/experr" @@ -140,7 +141,6 @@ func (pq *persistentQueue[T]) internalSize() int64 { } func (pq *persistentQueue[T]) requestSize() int64 { - //nolint:gosec return int64(pq.metadata.WriteIndex-pq.metadata.ReadIndex) + int64(len(pq.metadata.CurrentlyDispatchedItems)) } @@ -179,8 +179,7 @@ func (pq *persistentQueue[T]) loadQueueMetadata(ctx context.Context) error { return errValueNotSet } - metadata := &pq.metadata - if err := metadata.Unmarshal(buf); err != nil { + if err := proto.Unmarshal(buf, &pq.metadata); err != nil { return err } @@ -222,7 +221,7 @@ func (pq *persistentQueue[T]) loadLegacyMetadata(ctx context.Context) { pq.retrieveAndEnqueueNotDispatchedReqs(ctx) // Save to a new format and clean up legacy keys - metadataBytes, err := pq.metadata.Marshal() + metadataBytes, err := proto.Marshal(&pq.metadata) if err != nil { pq.logger.Error("Failed to marshal metadata", zap.Error(err)) return @@ -294,7 +293,7 @@ func (pq *persistentQueue[T]) Offer(ctx context.Context, req T) error { func (pq *persistentQueue[T]) putInternal(ctx context.Context, req T) error { pq.metadata.WriteIndex++ - metadataBuf, err := pq.metadata.Marshal() + metadataBuf, err := proto.Marshal(&pq.metadata) if err != nil { return err } @@ -363,7 +362,7 @@ func (pq *persistentQueue[T]) getNextItem(ctx context.Context) (uint64, T, conte var req T restoredCtx := context.Background() - metadataBytes, err := pq.metadata.Marshal() + metadataBytes, err := proto.Marshal(&pq.metadata) if err != nil { return 0, req, restoredCtx, false } @@ -516,7 +515,7 @@ func (pq *persistentQueue[T]) itemDispatchingFinish(ctx context.Context, index u pq.metadata.ItemsSize = 0 } - metadataBytes, err := pq.metadata.Marshal() + metadataBytes, err := proto.Marshal(&pq.metadata) if err != nil { return err } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue_sender.go index c49c7265647..804f34a7233 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue_sender.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue_sender.go @@ -16,15 +16,16 @@ import ( ) // NewDefaultQueueConfig returns the default config for queuebatch.Config. -// By default, the queue stores 1000 requests of telemetry and is non-blocking when full. +// By default: +// +// - the queue stores 1000 requests of telemetry +// - is non-blocking when full +// - concurrent exports limited to 10 +// - emits batches of 8192 items, timeout 200ms func NewDefaultQueueConfig() queuebatch.Config { return queuebatch.Config{ - Enabled: true, - Sizer: request.SizerTypeRequests, - NumConsumers: 10, - // By default, batches are 8192 spans, for a total of up to 8 million spans in the queue - // This can be estimated at 1-4 GB worth of maximum memory usage - // This default is probably still too high, and may be adjusted further down in a future release + Sizer: request.SizerTypeRequests, + NumConsumers: 10, QueueSize: 1_000, BlockOnOverflow: false, Batch: configoptional.Default(queuebatch.BatchConfig{ diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/config.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/config.go index ecef3ce3040..78569b85d82 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/config.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/config.go @@ -16,9 +16,6 @@ import ( // Config defines configuration for queueing and batching incoming requests. type Config struct { - // Enabled indicates whether to not enqueue and batch before exporting. - Enabled bool `mapstructure:"enabled"` - // WaitForResult determines if incoming requests are blocked until the request is processed or not. // Currently, this option is not available when persistent queue is configured using the storage configuration. WaitForResult bool `mapstructure:"wait_for_result"` @@ -66,10 +63,6 @@ func (cfg *Config) Unmarshal(conf *confmap.Conf) error { // Validate checks if the Config is valid func (cfg *Config) Validate() error { - if !cfg.Enabled { - return nil - } - if cfg.NumConsumers <= 0 { return errors.New("`num_consumers` must be positive") } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/config.schema.yaml b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/config.schema.yaml new file mode 100644 index 00000000000..116284119bc --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/config.schema.yaml @@ -0,0 +1,55 @@ +$defs: + batch_config: + description: BatchConfig defines a configuration for batching requests based on a timeout and a minimum number of items. + type: object + properties: + flush_timeout: + description: FlushTimeout sets the time after which a batch will be sent regardless of its size. + type: string + x-customType: time.Duration + format: duration + max_size: + description: MaxSize defines the configuration for the maximum size of a batch. + type: integer + x-customType: int64 + min_size: + description: MinSize defines the configuration for the minimum size of a batch. + type: integer + x-customType: int64 + sizer: + description: Sizer determines the type of size measurement used by the batch. If not configured, use the same configuration as the queue. It accepts "requests", "items", or "bytes". + type: string + x-customType: go.opentelemetry.io/collector/exporter/exporterhelper/internal/request.SizerType + config: + description: Config defines configuration for queueing and batching incoming requests. + type: object + properties: + batch: + description: BatchConfig it configures how the requests are consumed from the queue and batch together during consumption. + x-optional: true + $ref: batch_config + block_on_overflow: + description: BlockOnOverflow determines the behavior when the component's TotalSize limit is reached. If true, the component will wait for space; otherwise, operations will immediately return a retryable error. + type: boolean + enabled: + description: Enabled indicates whether to not enqueue and batch before exporting. + type: boolean + num_consumers: + description: NumConsumers is the maximum number of concurrent consumers from the queue. This applies across all different optional configurations from above (e.g. wait_for_result, block_on_overflow, storage, etc.). + type: integer + queue_size: + description: QueueSize represents the maximum data size allowed for concurrent storage and processing. + type: integer + x-customType: int64 + sizer: + description: Sizer determines the type of size measurement used by this component. It accepts "requests", "items", or "bytes". + type: string + x-customType: go.opentelemetry.io/collector/exporter/exporterhelper/internal/request.SizerType + storage: + description: 'StorageID if not empty, enables the persistent storage and uses the component specified as a storage extension for the persistent queue. TODO: This will be changed to Optional when available. See https://github.com/open-telemetry/opentelemetry-collector/issues/13822' + x-pointer: true + type: string + x-customType: go.opentelemetry.io/collector/component.ID + wait_for_result: + description: WaitForResult determines if incoming requests are blocked until the request is processed or not. Currently, this option is not available when persistent queue is configured using the storage configuration. + type: boolean diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/multi_batcher.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/multi_batcher.go index 6fb9eeb79e9..3177bff2147 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/multi_batcher.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/multi_batcher.go @@ -75,11 +75,9 @@ func (mb *multiBatcher) Consume(ctx context.Context, req request.Request, done q func (mb *multiBatcher) Shutdown(ctx context.Context) error { var wg sync.WaitGroup mb.shards.Range(func(_, shard any) bool { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { _ = shard.(*partitionBatcher).Shutdown(ctx) - }() + }) return true }) wg.Wait() diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/partition_batcher.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/partition_batcher.go index 323ea27f122..d58e0d0830d 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/partition_batcher.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/partition_batcher.go @@ -136,7 +136,7 @@ func (qb *partitionBatcher) Consume(ctx context.Context, req request.Request, do numRefs++ } if numRefs > 1 { - done = newRefCountDone(done, int64(len(reqList))) + done = newRefCountDone(done, int64(numRefs)) if mergeSplitErr != nil { done.OnDone(mergeSplitErr) } @@ -198,9 +198,7 @@ func (qb *partitionBatcher) Start(context.Context, component.Host) error { return nil } qb.timer = time.NewTimer(qb.cfg.FlushTimeout) - qb.stopWG.Add(1) - go func() { - defer qb.stopWG.Done() + qb.stopWG.Go(func() { for { select { case <-qb.shutdownCh: @@ -209,7 +207,7 @@ func (qb *partitionBatcher) Start(context.Context, component.Host) error { qb.flushCurrentBatchIfNecessary() } } - }() + }) return nil } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/sizer/proto_delta_sizer.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/sizer/proto_delta_sizer.go index 1ed9c5a8967..5e8aa940894 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/sizer/proto_delta_sizer.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/sizer/proto_delta_sizer.go @@ -26,7 +26,7 @@ type protoDeltaSizer struct{} // - opentelemetry-collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go // which is generated with gogo/protobuf. func (s *protoDeltaSizer) DeltaSize(newItemSize int) int { - return 1 + newItemSize + sov(uint64(newItemSize)) //nolint:gosec // disable G115 + return 1 + newItemSize + sov(uint64(newItemSize)) } func sov(x uint64) int { diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml index 9212c3ec221..949fc33defc 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml @@ -15,8 +15,7 @@ telemetry: metrics: exporter_enqueue_failed_log_records: enabled: true - stability: - level: alpha + stability: alpha description: Number of log records failed to be added to the sending queue. unit: "{records}" sum: @@ -25,18 +24,25 @@ telemetry: exporter_enqueue_failed_metric_points: enabled: true - stability: - level: alpha + stability: alpha description: Number of metric points failed to be added to the sending queue. unit: "{datapoints}" sum: value_type: int monotonic: true + exporter_enqueue_failed_profile_samples: + enabled: true + stability: development + description: Number of profile samples failed to be added to the sending queue. + unit: "{samples}" + sum: + value_type: int + monotonic: true + exporter_enqueue_failed_spans: enabled: true - stability: - level: alpha + stability: alpha description: Number of spans failed to be added to the sending queue. unit: "{spans}" sum: @@ -46,6 +52,7 @@ telemetry: exporter_queue_batch_send_size: enabled: true description: Number of units in the batch + stability: development unit: "{units}" histogram: value_type: int @@ -78,6 +85,7 @@ telemetry: exporter_queue_batch_send_size_bytes: enabled: true description: Number of bytes in batch that was sent. Only available on detailed level. + stability: development unit: By histogram: value_type: int @@ -101,8 +109,7 @@ telemetry: exporter_queue_capacity: enabled: true - stability: - level: alpha + stability: alpha description: Fixed capacity of the retry queue (in batches). unit: "{batches}" gauge: @@ -111,8 +118,7 @@ telemetry: exporter_queue_size: enabled: true - stability: - level: alpha + stability: alpha description: Current size of the retry queue (in batches). unit: "{batches}" gauge: @@ -121,9 +127,8 @@ telemetry: exporter_send_failed_log_records: enabled: true - stability: - level: alpha - description: Number of log records in failed attempts to send to destination. + stability: alpha + description: "Number of log records in failed attempts to send to destination. At detailed telemetry level, includes attributes: error.type (semantic convention), error.permanent." unit: "{records}" sum: value_type: int @@ -131,19 +136,26 @@ telemetry: exporter_send_failed_metric_points: enabled: true - stability: - level: alpha - description: Number of metric points in failed attempts to send to destination. + stability: alpha + description: "Number of metric points in failed attempts to send to destination. At detailed telemetry level, includes attributes: error.type (semantic convention), error.permanent." unit: "{datapoints}" sum: value_type: int monotonic: true + exporter_send_failed_profile_samples: + enabled: true + stability: development + description: "Number of profile samples in failed attempts to send to destination. At detailed telemetry level, includes attributes: error.type (semantic convention), error.permanent." + unit: "{samples}" + sum: + value_type: int + monotonic: true + exporter_send_failed_spans: enabled: true - stability: - level: alpha - description: Number of spans in failed attempts to send to destination. + stability: alpha + description: "Number of spans in failed attempts to send to destination. At detailed telemetry level, includes attributes: error.type (semantic convention), error.permanent." unit: "{spans}" sum: value_type: int @@ -151,8 +163,7 @@ telemetry: exporter_sent_log_records: enabled: true - stability: - level: alpha + stability: alpha description: Number of log record successfully sent to destination. unit: "{records}" sum: @@ -161,18 +172,25 @@ telemetry: exporter_sent_metric_points: enabled: true - stability: - level: alpha + stability: alpha description: Number of metric points successfully sent to destination. unit: "{datapoints}" sum: value_type: int monotonic: true + exporter_sent_profile_samples: + enabled: true + stability: development + description: Number of profile samples successfully sent to destination. + unit: "{samples}" + sum: + value_type: int + monotonic: true + exporter_sent_spans: enabled: true - stability: - level: alpha + stability: alpha description: Number of spans successfully sent to destination. unit: "{spans}" sum: diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_batch.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_batch.go index 2715cdc3c93..ce4eee560cd 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_batch.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_batch.go @@ -6,6 +6,7 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporte import ( "context" + "go.opentelemetry.io/collector/config/configoptional" "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch" @@ -14,7 +15,7 @@ import ( // WithQueue overrides the default QueueBatchConfig for an exporter. // The default QueueBatchConfig is to disable queueing. // This option cannot be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. -func WithQueue(config QueueBatchConfig) Option { +func WithQueue(config configoptional.Optional[QueueBatchConfig]) Option { return internal.WithQueue(config) } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/new_request.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/new_request.go index 7fe7472eeb5..044bc13634c 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/new_request.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/new_request.go @@ -6,6 +6,7 @@ package xexporterhelper // import "go.opentelemetry.io/collector/exporter/export import ( "context" + "go.opentelemetry.io/collector/config/configoptional" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/exporter/exporterhelper/internal" @@ -83,6 +84,6 @@ func NewTracesQueueBatchSettings() QueueBatchSettings { // This option should be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func WithQueueBatch(cfg exporterhelper.QueueBatchConfig, set QueueBatchSettings) exporterhelper.Option { +func WithQueueBatch(cfg configoptional.Optional[exporterhelper.QueueBatchConfig], set QueueBatchSettings) exporterhelper.Option { return internal.WithQueueBatch(cfg, set) } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles_batch.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles_batch.go index 3ee8d7a5c90..2a837a745eb 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles_batch.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles_batch.go @@ -33,18 +33,10 @@ func (req *profilesRequest) MergeSplit(_ context.Context, maxSize int, szt expor if !ok { return nil, errors.New("invalid input type") } - // TODO(13106): handle merging of profiles (and change the indice tables with their new indices) - // req2.mergeTo(req, sz) - - // If no limit we can simply merge the new request into the current and return. - if maxSize == 0 { - return []Request{req, req2}, nil + err := req2.mergeTo(req, sz) + if err != nil { + return nil, fmt.Errorf("failed merging profiles; %w", err) } - - sp1, err1 := req.split(maxSize, sz) - sp2, err2 := req2.split(maxSize, sz) - - return append(sp1, sp2...), errors.Join(err1, err2) } // If no limit we can simply merge the new request into the current and return. @@ -54,14 +46,13 @@ func (req *profilesRequest) MergeSplit(_ context.Context, maxSize int, szt expor return req.split(maxSize, sz) } -// TODO(13106): handle merging of profiles (and change the indice tables with their new indices) -/*func (req *profilesRequest) mergeTo(dst *profilesRequest, sz sizer.ProfilesSizer) { +func (req *profilesRequest) mergeTo(dst *profilesRequest, sz sizer.ProfilesSizer) error { if sz != nil { dst.setCachedSize(dst.size(sz) + req.size(sz)) req.setCachedSize(0) } - req.pd.ResourceProfiles().MoveAndAppendTo(dst.pd.ResourceProfiles()) -}*/ + return req.pd.MergeTo(dst.pd) +} func (req *profilesRequest) split(maxSize int, sz sizer.ProfilesSizer) ([]Request, error) { var res []Request @@ -83,6 +74,8 @@ func extractProfiles(srcProfiles pprofile.Profiles, capacity int, sz sizer.Profi destProfiles := pprofile.NewProfiles() capacityLeft := capacity - sz.ProfilesSize(destProfiles) removedSize := 0 + + srcProfiles.Dictionary().CopyTo(destProfiles.Dictionary()) srcProfiles.ResourceProfiles().RemoveIf(func(srcRP pprofile.ResourceProfiles) bool { // If the no more capacity left just return. if capacityLeft == 0 { diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/experr/err.go b/vendor/go.opentelemetry.io/collector/exporter/internal/experr/err.go deleted file mode 100644 index f704533639b..00000000000 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/experr/err.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package experr // import "go.opentelemetry.io/collector/exporter/internal/experr" - -import ( - "fmt" - - "go.opentelemetry.io/collector/component" -) - -func ErrIDMismatch(id component.ID, typ component.Type) error { - return fmt.Errorf("component type mismatch: component ID %q does not have type %q", id, typ) -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md index 8c4f09a247e..8b75b98eedf 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md @@ -1,6 +1,5 @@ -# OTLP gRPC Exporter - +# OTLP gRPC Exporter | Status | | | ------------- |-----------| | Stability | [development]: profiles | @@ -37,7 +36,7 @@ Example: ```yaml exporters: - otlp: + otlp_grpc: endpoint: otelcol2:4317 tls: cert_file: file.cert @@ -52,7 +51,7 @@ By default, `gzip` compression is enabled. See [compression comparison](../../co ```yaml exporters: - otlp: + otlp_grpc: ... compression: none ``` diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go index 7bcd17c0235..d2c3b610f7c 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go @@ -5,51 +5,37 @@ package otlpexporter // import "go.opentelemetry.io/collector/exporter/otlpexpor import ( "errors" - "fmt" - "net" "regexp" - "strconv" "strings" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configoptional" "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/confmap/xconfmap" "go.opentelemetry.io/collector/exporter/exporterhelper" ) // Config defines configuration for OTLP exporter. type Config struct { - TimeoutConfig exporterhelper.TimeoutConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. - QueueConfig exporterhelper.QueueBatchConfig `mapstructure:"sending_queue"` - RetryConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` - ClientConfig configgrpc.ClientConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + TimeoutConfig exporterhelper.TimeoutConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + QueueConfig configoptional.Optional[exporterhelper.QueueBatchConfig] `mapstructure:"sending_queue"` + RetryConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` + ClientConfig configgrpc.ClientConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. // prevent unkeyed literal initialization _ struct{} } -func (c *Config) Validate() error { - if after, ok := strings.CutPrefix(c.ClientConfig.Endpoint, "unix://"); ok { - if after == "" { - return errors.New("unix socket path cannot be empty") - } - return nil - } +var ( + _ component.Config = (*Config)(nil) + _ xconfmap.Validator = (*Config)(nil) +) - endpoint := c.sanitizedEndpoint() - if endpoint == "" { +func (c *Config) Validate() error { + if endpoint := c.sanitizedEndpoint(); endpoint == "" { return errors.New(`requires a non-empty "endpoint"`) } - - // Validate that the port is in the address - _, port, err := net.SplitHostPort(endpoint) - if err != nil { - return err - } - if _, err := strconv.Atoi(port); err != nil { - return fmt.Errorf(`invalid port "%s"`, port) - } - return nil } @@ -66,5 +52,3 @@ func (c *Config) sanitizedEndpoint() string { return c.ClientConfig.Endpoint } } - -var _ component.Config = (*Config)(nil) diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.yaml b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.yaml new file mode 100644 index 00000000000..4009acb1655 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.yaml @@ -0,0 +1,11 @@ +description: Config defines configuration for OTLP exporter. +type: object +properties: + retry_on_failure: + $ref: go.opentelemetry.io/collector/config/configretry.back_off_config + sending_queue: + x-optional: true + $ref: go.opentelemetry.io/collector/exporter/exporterhelper.queue_batch_config +allOf: + - $ref: go.opentelemetry.io/collector/exporter/exporterhelper.timeout_config + - $ref: go.opentelemetry.io/collector/config/configgrpc.client_config diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go index 3d5cc851214..7a031df8ca6 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go @@ -24,6 +24,7 @@ func NewFactory() exporter.Factory { return xexporter.NewFactory( metadata.Type, createDefaultConfig, + xexporter.WithDeprecatedTypeAlias(component.MustNewType("otlp")), xexporter.WithTraces(createTraces, metadata.TracesStability), xexporter.WithMetrics(createMetrics, metadata.MetricsStability), xexporter.WithLogs(createLogs, metadata.LogsStability), @@ -39,12 +40,11 @@ func createDefaultConfig() component.Config { clientCfg.WriteBufferSize = 512 * 1024 // For backward compatibility: clientCfg.Keepalive = configoptional.None[configgrpc.KeepaliveClientConfig]() - clientCfg.BalancerName = "" return &Config{ TimeoutConfig: exporterhelper.NewDefaultTimeoutConfig(), RetryConfig: configretry.NewDefaultBackOffConfig(), - QueueConfig: exporterhelper.NewDefaultQueueConfig(), + QueueConfig: configoptional.Some(exporterhelper.NewDefaultQueueConfig()), ClientConfig: clientCfg, } } diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata/generated_status.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata/generated_status.go index 02066a673f0..d066fb70f15 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata/generated_status.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata/generated_status.go @@ -7,7 +7,7 @@ import ( ) var ( - Type = component.MustNewType("otlp") + Type = component.MustNewType("otlp_grpc") ScopeName = "go.opentelemetry.io/collector/exporter/otlpexporter" ) diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml index ec58c59c24d..07fb67848b8 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml @@ -1,4 +1,5 @@ -type: otlp +display_name: OTLP gRPC Exporter +type: otlp_grpc github_project: open-telemetry/opentelemetry-collector status: diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go index 62f1893e5ea..2b0e95fa86b 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go @@ -63,7 +63,7 @@ func newExporter(cfg component.Config, set exporter.Settings) *baseExporter { // is the only place we get hold of Extensions which are required to construct auth round tripper. func (e *baseExporter) start(ctx context.Context, host component.Host) (err error) { agentOpt := configgrpc.WithGrpcDialOption(grpc.WithUserAgent(e.userAgent)) - if e.clientConn, err = e.config.ClientConfig.ToClientConn(ctx, host, e.settings, agentOpt); err != nil { + if e.clientConn, err = e.config.ClientConfig.ToClientConn(ctx, host.GetExtensions(), e.settings, agentOpt); err != nil { return err } e.traceExporter = ptraceotlp.NewGRPCClient(e.clientConn) @@ -71,7 +71,7 @@ func (e *baseExporter) start(ctx context.Context, host component.Host) (err erro e.logExporter = plogotlp.NewGRPCClient(e.clientConn) e.profileExporter = pprofileotlp.NewGRPCClient(e.clientConn) headers := map[string]string{} - for k, v := range e.config.ClientConfig.Headers { + for k, v := range e.config.ClientConfig.Headers.Iter { headers[k] = string(v) } e.metadata = metadata.New(headers) diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/README.md b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/README.md index c4e1f92200d..8cb96381d74 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/README.md +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/README.md @@ -1,6 +1,5 @@ -# OTLP/HTTP Exporter - +# OTLP HTTP Exporter | Status | | | ------------- |-----------| | Stability | [development]: profiles | @@ -16,10 +15,13 @@ [otlp]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-otlp -Export traces and/or metrics via HTTP using [OTLP]( +The `otlp_http` exporter sends logs, metrics, profiles and traces via HTTP using [OTLP]( https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/specification.md) format. +The `otlphttp` deprecated alias exists for the component name. It will be removed in a future version. +If you use the deprecated alias `otlphttp` in your configuration, change it to `otlp_http`. + The following settings are required: - `endpoint` (no default): The target base URL to send data to (e.g.: https://example.com:4318). @@ -48,7 +50,7 @@ Example: ```yaml exporters: - otlphttp: + otlp_http: endpoint: https://example.com:4318 ``` @@ -56,7 +58,7 @@ By default `gzip` compression is enabled. See [compression comparison](../../con ```yaml exporters: - otlphttp: + otlp_http: ... compression: none ``` @@ -65,7 +67,7 @@ By default `proto` encoding is used, to change the content encoding of the messa ```yaml exporters: - otlphttp: + otlp_http: ... encoding: json ``` diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/config.go b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/config.go index 0417fe1dd75..8ce72fa28b4 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/config.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/config.go @@ -10,6 +10,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configoptional" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/exporter/exporterhelper" ) @@ -45,9 +46,9 @@ func (e *EncodingType) UnmarshalText(text []byte) error { // Config defines configuration for OTLP/HTTP exporter. type Config struct { - ClientConfig confighttp.ClientConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. - QueueConfig exporterhelper.QueueBatchConfig `mapstructure:"sending_queue"` - RetryConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` + ClientConfig confighttp.ClientConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + QueueConfig configoptional.Optional[exporterhelper.QueueBatchConfig] `mapstructure:"sending_queue"` + RetryConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` // The URL to send traces to. If omitted the Endpoint + "/v1/traces" will be used. TracesEndpoint string `mapstructure:"traces_endpoint"` diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/factory.go b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/factory.go index 3db1c4d0f56..66f8106e4aa 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/factory.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/factory.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configcompression" "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configoptional" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter" @@ -27,6 +28,7 @@ func NewFactory() exporter.Factory { return xexporter.NewFactory( metadata.Type, createDefaultConfig, + xexporter.WithDeprecatedTypeAlias(component.MustNewType("otlphttp")), xexporter.WithTraces(createTraces, metadata.TracesStability), xexporter.WithMetrics(createMetrics, metadata.MetricsStability), xexporter.WithLogs(createLogs, metadata.LogsStability), @@ -44,7 +46,7 @@ func createDefaultConfig() component.Config { return &Config{ RetryConfig: configretry.NewDefaultBackOffConfig(), - QueueConfig: exporterhelper.NewDefaultQueueConfig(), + QueueConfig: configoptional.Some(exporterhelper.NewDefaultQueueConfig()), Encoding: EncodingProto, ClientConfig: clientConfig, } diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata/generated_status.go b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata/generated_status.go index f16f032244b..498f2332d89 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata/generated_status.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata/generated_status.go @@ -7,7 +7,7 @@ import ( ) var ( - Type = component.MustNewType("otlphttp") + Type = component.MustNewType("otlp_http") ScopeName = "go.opentelemetry.io/collector/exporter/otlphttpexporter" ) diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/metadata.yaml b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/metadata.yaml index 7fdb6b26881..b8dc6a24be1 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/metadata.yaml @@ -1,4 +1,5 @@ -type: otlphttp +display_name: OTLP HTTP Exporter +type: otlp_http github_project: open-telemetry/opentelemetry-collector status: @@ -11,5 +12,7 @@ status: tests: config: - endpoint: "https://1.2.3.4:1234" + # use an endpoint that does not resolve, to ensure + # connection attempts fail quickly in tests + endpoint: "https://testing.invalid:1234" diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/otlp.go b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/otlp.go index 26573a1836d..6cb68a02685 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/otlp.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/otlp.go @@ -82,7 +82,7 @@ func newExporter(cfg component.Config, set exporter.Settings) (*baseExporter, er // start actually creates the HTTP client. The client construction is deferred till this point as this // is the only place we get hold of Extensions which are required to construct auth round tripper. func (e *baseExporter) start(ctx context.Context, host component.Host) error { - client, err := e.config.ClientConfig.ToClient(ctx, host, e.settings) + client, err := e.config.ClientConfig.ToClient(ctx, host.GetExtensions(), e.settings) if err != nil { return err } diff --git a/vendor/go.opentelemetry.io/collector/exporter/xexporter/exporter.go b/vendor/go.opentelemetry.io/collector/exporter/xexporter/exporter.go index dea6ed6084e..3a5fb4e5dbf 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/xexporter/exporter.go +++ b/vendor/go.opentelemetry.io/collector/exporter/xexporter/exporter.go @@ -9,7 +9,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/internal/experr" + "go.opentelemetry.io/collector/internal/componentalias" "go.opentelemetry.io/collector/pipeline" ) @@ -34,55 +34,60 @@ type Factory interface { // FactoryOption apply changes to ReceiverOptions. type FactoryOption interface { // applyOption applies the option. - applyOption(o *factoryOpts) + applyOption(o *factory) } // factoryOptionFunc is an ReceiverFactoryOption created through a function. -type factoryOptionFunc func(*factoryOpts) +type factoryOptionFunc func(*factory) -func (f factoryOptionFunc) applyOption(o *factoryOpts) { +func (f factoryOptionFunc) applyOption(o *factory) { f(o) } -type factoryOpts struct { - opts []exporter.FactoryOption - *factory -} - // CreateProfilesFunc is the equivalent of Factory.CreateProfiles. type CreateProfilesFunc func(context.Context, exporter.Settings, component.Config) (Profiles, error) // WithTraces overrides the default "error not supported" implementation for CreateTraces and the default "undefined" stability level. func WithTraces(createTraces exporter.CreateTracesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, exporter.WithTraces(createTraces, sl)) }) } // WithMetrics overrides the default "error not supported" implementation for CreateMetrics and the default "undefined" stability level. func WithMetrics(createMetrics exporter.CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, exporter.WithMetrics(createMetrics, sl)) }) } // WithLogs overrides the default "error not supported" implementation for CreateLogs and the default "undefined" stability level. func WithLogs(createLogs exporter.CreateLogsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, exporter.WithLogs(createLogs, sl)) }) } // WithProfiles overrides the default "error not supported" implementation for CreateProfilesExporter and the default "undefined" stability level. func WithProfiles(createProfiles CreateProfilesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.profilesStabilityLevel = sl o.createProfilesFunc = createProfiles }) } +// WithDeprecatedTypeAlias configures a deprecated type alias for the exporter. Only one alias is supported per exporter. +// When the alias is used in configuration, a deprecation warning is automatically logged. +func WithDeprecatedTypeAlias(alias component.Type) FactoryOption { + return factoryOptionFunc(func(o *factory) { + o.SetDeprecatedAlias(alias) + }) +} + type factory struct { exporter.Factory + componentalias.TypeAliasHolder + opts []exporter.FactoryOption createProfilesFunc CreateProfilesFunc profilesStabilityLevel component.StabilityLevel } @@ -95,19 +100,19 @@ func (f *factory) CreateProfiles(ctx context.Context, set exporter.Settings, cfg if f.createProfilesFunc == nil { return nil, pipeline.ErrSignalNotSupported } - - if set.ID.Type() != f.Type() { - return nil, experr.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f.Factory, set.ID); err != nil { + return nil, err } return f.createProfilesFunc(ctx, set, cfg) } -// NewFactory returns a Factory. +// NewFactory creates a wrapped exporter.Factory with experimental capabilities. func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { - opts := factoryOpts{factory: &factory{}} + f := &factory{TypeAliasHolder: componentalias.NewTypeAliasHolder()} for _, opt := range options { - opt.applyOption(&opts) + opt.applyOption(f) } - opts.Factory = exporter.NewFactory(cfgType, createDefaultConfig, opts.opts...) - return opts.factory + f.Factory = exporter.NewFactory(cfgType, createDefaultConfig, f.opts...) + f.Factory.(componentalias.TypeAliasHolder).SetDeprecatedAlias(f.DeprecatedAlias()) + return f } diff --git a/vendor/go.opentelemetry.io/collector/extension/extension.go b/vendor/go.opentelemetry.io/collector/extension/extension.go index b127432bcf9..d90571344e6 100644 --- a/vendor/go.opentelemetry.io/collector/extension/extension.go +++ b/vendor/go.opentelemetry.io/collector/extension/extension.go @@ -5,9 +5,9 @@ package extension // import "go.opentelemetry.io/collector/extension" import ( "context" - "fmt" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/internal/componentalias" ) // Extension is the interface for objects hosted by the OpenTelemetry Collector that @@ -49,6 +49,7 @@ type Factory interface { type factory struct { cfgType component.Type component.CreateDefaultConfigFunc + componentalias.TypeAliasHolder createFunc CreateFunc extensionStability component.StabilityLevel } @@ -64,8 +65,8 @@ func (f *factory) Stability() component.StabilityLevel { } func (f *factory) Create(ctx context.Context, set Settings, cfg component.Config) (Extension, error) { - if set.ID.Type() != f.cfgType { - return nil, fmt.Errorf("component type mismatch: component ID %q does not have type %q", set.ID, f.cfgType) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createFunc(ctx, set, cfg) @@ -81,6 +82,7 @@ func NewFactory( return &factory{ cfgType: cfgType, CreateDefaultConfigFunc: createDefaultConfig, + TypeAliasHolder: componentalias.NewTypeAliasHolder(), createFunc: createServiceExtension, extensionStability: sl, } diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE b/vendor/go.opentelemetry.io/collector/internal/componentalias/LICENSE similarity index 88% rename from vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE rename to vendor/go.opentelemetry.io/collector/internal/componentalias/LICENSE index f1aee0f1100..d6456956733 100644 --- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE +++ b/vendor/go.opentelemetry.io/collector/internal/componentalias/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -199,33 +200,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - --------------------------------------------------------------------------------- - -Copyright 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google LLC nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/collector/internal/componentalias/Makefile b/vendor/go.opentelemetry.io/collector/internal/componentalias/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/componentalias/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/internal/componentalias/alias.go b/vendor/go.opentelemetry.io/collector/internal/componentalias/alias.go new file mode 100644 index 00000000000..7008a269cb6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/componentalias/alias.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package componentalias // import "go.opentelemetry.io/collector/internal/componentalias" + +import ( + "errors" + "fmt" + + "go.opentelemetry.io/collector/component" +) + +type TypeAliasHolder interface { + DeprecatedAlias() component.Type + SetDeprecatedAlias(component.Type) +} + +func NewTypeAliasHolder() TypeAliasHolder { + ta := typeAlias(component.Type{}) + return &ta +} + +type typeAlias component.Type + +// DeprecatedAlias returns the deprecated type typeAlias for this component, if any. +// Returns an empty component.Type if no typeAlias is configured. +func (ta *typeAlias) DeprecatedAlias() component.Type { + return component.Type(*ta) +} + +// SetDeprecatedAlias sets the deprecated type typeAlias. +func (ta *typeAlias) SetDeprecatedAlias(newAlias component.Type) { + *ta = typeAlias(newAlias) +} + +// ValidateComponentType returns an error if the provided factory does not match the provided component ID. +// It checks both the current type and any deprecated alias type. +func ValidateComponentType(f component.Factory, id component.ID) error { + if id.Type() == f.Type() { + return nil + } + errMsg := fmt.Sprintf("component type mismatch: component ID %q does not have type %q", id, f.Type()) + if aliasHolder, ok := f.(TypeAliasHolder); ok && aliasHolder.DeprecatedAlias().String() != "" { + if id.Type() == aliasHolder.DeprecatedAlias() { + return nil + } + errMsg += fmt.Sprintf(" or deprecated alias type %q", aliasHolder.DeprecatedAlias()) + } + return errors.New(errMsg) +} diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/attribute.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/attribute.go similarity index 72% rename from vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/attribute.go rename to vendor/go.opentelemetry.io/collector/internal/telemetry/attribute.go index 2c18e5d9144..329935f49b4 100644 --- a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/attribute.go +++ b/vendor/go.opentelemetry.io/collector/internal/telemetry/attribute.go @@ -1,11 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package componentattribute // import "go.opentelemetry.io/collector/internal/telemetry/componentattribute" +package telemetry // import "go.opentelemetry.io/collector/internal/telemetry" import ( - "slices" - "go.opentelemetry.io/otel/attribute" "go.uber.org/zap" ) @@ -18,17 +16,10 @@ const ( SignalOutputKey = "otelcol.signal.output" ) -func RemoveAttributes(attrs attribute.Set, fields ...string) attribute.Set { - attrs, _ = attribute.NewSetWithFiltered(attrs.ToSlice(), func(kv attribute.KeyValue) bool { - return !slices.Contains(fields, string(kv.Key)) - }) - return attrs -} - // ToZapFields converts an OTel Go attribute set to a slice of zap fields. -func ToZapFields(attrs attribute.Set) []zap.Field { - zapFields := make([]zap.Field, 0, attrs.Len()) - for _, attr := range attrs.ToSlice() { +func ToZapFields(attrs []attribute.KeyValue) []zap.Field { + zapFields := make([]zap.Field, 0, len(attrs)) + for _, attr := range attrs { var zapField zap.Field key := string(attr.Key) switch attr.Value.Type() { diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_zap.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_zap.go deleted file mode 100644 index 4034e69311b..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_zap.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package componentattribute // import "go.opentelemetry.io/collector/internal/telemetry/componentattribute" - -import ( - "go.opentelemetry.io/contrib/bridges/otelzap" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/log" - "go.uber.org/multierr" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// Interface for Zap cores that support setting and resetting a set of component attributes. -// -// There are two wrappers that implement this interface: -// -// - [NewConsoleCoreWithAttributes] injects component attributes as Zap fields. -// -// This is used for the Collector's console output. -// -// - [NewOTelTeeCoreWithAttributes] copies logs to a [log.LoggerProvider] using [otelzap]. For the -// copied logs, component attributes are injected as instrumentation scope attributes. -// -// This is used when service::telemetry::logs::processors is configured. -type coreWithAttributes interface { - zapcore.Core - withAttributeSet(attribute.Set) zapcore.Core -} - -// Tries setting the component attribute set for a Zap core. -// -// Does nothing if the core does not implement [coreWithAttributes]. -func tryWithAttributeSet(c zapcore.Core, attrs attribute.Set) zapcore.Core { - if cwa, ok := c.(coreWithAttributes); ok { - return cwa.withAttributeSet(attrs) - } - zap.New(c).Debug("Logger core does not support injecting component attributes") - return c -} - -type consoleCoreWithAttributes struct { - zapcore.Core - from zapcore.Core - extraFields []zap.Field -} - -var _ coreWithAttributes = (*consoleCoreWithAttributes)(nil) - -// NewConsoleCoreWithAttributes wraps a Zap core in order to inject component attributes as Zap fields. -// -// This is used for the Collector's console output. -func NewConsoleCoreWithAttributes(c zapcore.Core, attrs attribute.Set, extraFields ...zap.Field) zapcore.Core { - return &consoleCoreWithAttributes{ - Core: c.With(ToZapFields(attrs)).With(extraFields), - from: c, - } -} - -func (ccwa *consoleCoreWithAttributes) With(fields []zapcore.Field) zapcore.Core { - return &consoleCoreWithAttributes{ - Core: ccwa.Core.With(fields), - from: ccwa.from, - extraFields: append(ccwa.extraFields, fields...), - } -} - -func (ccwa *consoleCoreWithAttributes) withAttributeSet(attrs attribute.Set) zapcore.Core { - return NewConsoleCoreWithAttributes(ccwa.from, attrs, ccwa.extraFields...) -} - -type otelTeeCoreWithAttributes struct { - sourceCore zapcore.Core - otelCore zapcore.Core - lp log.LoggerProvider - scopeName string -} - -var _ coreWithAttributes = (*otelTeeCoreWithAttributes)(nil) - -// NewOTelTeeCoreWithAttributes wraps a Zap core in order to copy logs to a [log.LoggerProvider] using [otelzap]. -// For the copied logs, component attributes are injected as instrumentation scope attributes. -// -// Note that we intentionally do not use zapcore.NewTee here, because it will simply duplicate all log entries -// to each core. The provided Zap core may have sampling or a minimum log level applied to it, so in order to -// maintain consistency we need to ensure that only the logs accepted by the provided core are copied to the -// log.LoggerProvider. -func NewOTelTeeCoreWithAttributes(core zapcore.Core, lp log.LoggerProvider, scopeName string, attrs attribute.Set) zapcore.Core { - otelCore := otelzap.NewCore( - scopeName, - otelzap.WithLoggerProvider(lp), - otelzap.WithAttributes(attrs.ToSlice()...), - ) - return &otelTeeCoreWithAttributes{ - sourceCore: core, - otelCore: otelCore, - lp: lp, - scopeName: scopeName, - } -} - -func (ocwa *otelTeeCoreWithAttributes) withAttributeSet(attrs attribute.Set) zapcore.Core { - return NewOTelTeeCoreWithAttributes( - tryWithAttributeSet(ocwa.sourceCore, attrs), - ocwa.lp, ocwa.scopeName, attrs, - ) -} - -func (ocwa *otelTeeCoreWithAttributes) With(fields []zapcore.Field) zapcore.Core { - sourceCoreWith := ocwa.sourceCore.With(fields) - otelCoreWith := ocwa.otelCore.With(fields) - return &otelTeeCoreWithAttributes{ - sourceCore: sourceCoreWith, - otelCore: otelCoreWith, - lp: ocwa.lp, - scopeName: ocwa.scopeName, - } -} - -func (ocwa *otelTeeCoreWithAttributes) Enabled(level zapcore.Level) bool { - return ocwa.sourceCore.Enabled(level) -} - -func (ocwa *otelTeeCoreWithAttributes) Check(entry zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { - ce = ocwa.sourceCore.Check(entry, ce) - if ce != nil { - // Only log to the otelzap core if the input core accepted the log entry. - ce = ce.AddCore(entry, ocwa.otelCore) - } - return ce -} - -func (ocwa *otelTeeCoreWithAttributes) Write(entry zapcore.Entry, fields []zapcore.Field) error { - err := ocwa.sourceCore.Write(entry, fields) - return multierr.Append(err, ocwa.otelCore.Write(entry, fields)) -} - -func (ocwa *otelTeeCoreWithAttributes) Sync() error { - err := ocwa.sourceCore.Sync() - return multierr.Append(err, ocwa.otelCore.Sync()) -} - -// ZapLoggerWithAttributes creates a Zap Logger with a new set of injected component attributes. -func ZapLoggerWithAttributes(logger *zap.Logger, attrs attribute.Set) *zap.Logger { - return logger.WithOptions(zap.WrapCore(func(c zapcore.Core) zapcore.Core { - return tryWithAttributeSet(c, attrs) - })) -} diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/tracer_provider.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/tracer_provider.go deleted file mode 100644 index 5eac1a8bdf1..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/tracer_provider.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package componentattribute // import "go.opentelemetry.io/collector/internal/telemetry/componentattribute" - -import ( - "slices" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -type tracerProviderWithAttributes struct { - trace.TracerProvider - attrs []attribute.KeyValue -} - -// TracerProviderWithAttributes creates a TracerProvider with a new set of injected instrumentation scope attributes. -func TracerProviderWithAttributes(tp trace.TracerProvider, attrs attribute.Set) trace.TracerProvider { - if tpwa, ok := tp.(tracerProviderWithAttributes); ok { - tp = tpwa.TracerProvider - } - return tracerProviderWithAttributes{ - TracerProvider: tp, - attrs: attrs.ToSlice(), - } -} - -func tracerWithAttributes(tp trace.TracerProvider, attrs []attribute.KeyValue, name string, opts ...trace.TracerOption) trace.Tracer { - conf := trace.NewTracerConfig(opts...) - attrSet := conf.InstrumentationAttributes() - // prepend our attributes so they can be overwritten - newAttrs := append(slices.Clone(attrs), attrSet.ToSlice()...) - // append our attribute set option to overwrite the old one - opts = append(opts, trace.WithInstrumentationAttributes(newAttrs...)) - return tp.Tracer(name, opts...) -} - -func (tpwa tracerProviderWithAttributes) Tracer(name string, options ...trace.TracerOption) trace.Tracer { - return tracerWithAttributes(tpwa.TracerProvider, tpwa.attrs, name, options...) -} - -func (tpwa tracerProviderWithAttributes) Unwrap() trace.TracerProvider { - return tpwa.TracerProvider -} diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/telemetry.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/telemetry.go index 7fa0e2f584c..e792b0b1ae0 100644 --- a/vendor/go.opentelemetry.io/collector/internal/telemetry/telemetry.go +++ b/vendor/go.opentelemetry.io/collector/internal/telemetry/telemetry.go @@ -4,14 +4,13 @@ package telemetry // import "go.opentelemetry.io/collector/internal/telemetry" import ( - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/featuregate" - "go.opentelemetry.io/collector/internal/telemetry/componentattribute" - "go.opentelemetry.io/collector/pdata/pcommon" ) var NewPipelineTelemetryGate = featuregate.GlobalRegistry().MustRegister( @@ -22,42 +21,30 @@ var NewPipelineTelemetryGate = featuregate.GlobalRegistry().MustRegister( featuregate.WithRegisterDescription("Injects component-identifying scope attributes in internal Collector metrics"), ) -// IMPORTANT: This struct is reexported as part of the public API of -// go.opentelemetry.io/collector/component, a stable module. -// DO NOT MAKE BREAKING CHANGES TO EXPORTED FIELDS. -type TelemetrySettings struct { - // Logger that the factory can use during creation and can pass to the created - // component to be used later as well. - Logger *zap.Logger - - // TracerProvider that the factory can pass to other instrumented third-party libraries. - // - // The service may wrap this provider for attribute injection. The wrapper may implement an - // additional `Unwrap() trace.TracerProvider` method to grant access to the underlying SDK. - TracerProvider trace.TracerProvider - - // MeterProvider that the factory can pass to other instrumented third-party libraries. - MeterProvider metric.MeterProvider - - // Resource contains the resource attributes for the collector's telemetry. - Resource pcommon.Resource - - // Extra attributes added to instrumentation scopes - extraAttributes attribute.Set +type injectorCore interface { + DropInjectedAttributes(droppedAttrs ...string) zapcore.Core } -// The publicization of this API is tracked in https://github.com/open-telemetry/opentelemetry-collector/issues/12405 +type injectorTracerProvider interface { + DropInjectedAttributes(droppedAttrs ...string) trace.TracerProvider +} -func WithoutAttributes(ts TelemetrySettings, fields ...string) TelemetrySettings { - return WithAttributeSet(ts, componentattribute.RemoveAttributes(ts.extraAttributes, fields...)) +type injectorMeterProvider interface { + DropInjectedAttributes(droppedAttrs ...string) metric.MeterProvider } -func WithAttributeSet(ts TelemetrySettings, attrs attribute.Set) TelemetrySettings { - ts.extraAttributes = attrs - ts.Logger = componentattribute.ZapLoggerWithAttributes(ts.Logger, ts.extraAttributes) - ts.TracerProvider = componentattribute.TracerProviderWithAttributes(ts.TracerProvider, ts.extraAttributes) - if NewPipelineTelemetryGate.IsEnabled() { - ts.MeterProvider = componentattribute.MeterProviderWithAttributes(ts.MeterProvider, ts.extraAttributes) +func DropInjectedAttributes(ts component.TelemetrySettings, attrs ...string) component.TelemetrySettings { + ts.Logger = ts.Logger.WithOptions(zap.WrapCore(func(c zapcore.Core) zapcore.Core { + if ic, ok := c.(injectorCore); ok { + return ic.DropInjectedAttributes(attrs...) + } + return c + })) + if itp, ok := ts.TracerProvider.(injectorTracerProvider); ok { + ts.TracerProvider = itp.DropInjectedAttributes(attrs...) + } + if imp, ok := ts.MeterProvider.(injectorMeterProvider); ok { + ts.MeterProvider = imp.DropInjectedAttributes(attrs...) } return ts } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/collector.go b/vendor/go.opentelemetry.io/collector/otelcol/collector.go index eff64d6275b..726eaf0eda5 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/collector.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/collector.go @@ -14,7 +14,6 @@ import ( "os/signal" "sync" "sync/atomic" - "syscall" "go.uber.org/multierr" "go.uber.org/zap" @@ -25,7 +24,6 @@ import ( "go.opentelemetry.io/collector/confmap/xconfmap" "go.opentelemetry.io/collector/otelcol/internal/grpclog" "go.opentelemetry.io/collector/service" - "go.opentelemetry.io/collector/service/telemetry/otelconftelemetry" ) // State defines Collector's state. @@ -54,7 +52,8 @@ func (s State) String() string { // CollectorSettings holds configuration for creating a new Collector. type CollectorSettings struct { - // Factories service factories. + // Factories returns component factories for the collector. + // // TODO(13263) This is a dangerous "bare" function value, should define an interface // following style guidelines. Factories func() (Factories, error) @@ -99,7 +98,8 @@ type CollectorSettings struct { // Collector represents a server providing the OpenTelemetry Collector service. type Collector struct { - set CollectorSettings + set CollectorSettings + buildZapLogger func(zap.Config, ...zap.Option) (*zap.Logger, error) configProvider *ConfigProvider @@ -136,9 +136,10 @@ func NewCollector(set CollectorSettings) (*Collector, error) { state := new(atomic.Int64) state.Store(int64(StateStarting)) return &Collector{ - set: set, - state: state, - shutdownChan: make(chan struct{}), + set: set, + buildZapLogger: zap.Config.Build, + state: state, + shutdownChan: make(chan struct{}), // Per signal.Notify documentation, a size of the channel equaled with // the number of signals getting notified on is recommended. signalsChannel: make(chan os.Signal, 3), @@ -178,6 +179,7 @@ func (col *Collector) setupConfigurationComponents(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to initialize factories: %w", err) } + cfg, err := col.configProvider.Get(ctx, factories) if err != nil { return fmt.Errorf("failed to get config: %w", err) @@ -195,6 +197,17 @@ func (col *Collector) setupConfigurationComponents(ctx context.Context) error { return fmt.Errorf("could not marshal configuration: %w", err) } + // Wrap the buildZapLogger to append LoggingOptions from collector settings, + // since service.Settings.LoggingOptions is deprecated. + buildZapLogger := col.buildZapLogger + if len(col.set.LoggingOptions) > 0 { + origBuildZapLogger := buildZapLogger + buildZapLogger = func(zapCfg zap.Config, opts ...zap.Option) (*zap.Logger, error) { + opts = append(opts, col.set.LoggingOptions...) + return origBuildZapLogger(zapCfg, opts...) + } + } + col.service, err = service.New(ctx, service.Settings{ BuildInfo: col.set.BuildInfo, CollectorConf: conf, @@ -218,11 +231,8 @@ func (col *Collector) setupConfigurationComponents(ctx context.Context) error { Connector: buildModuleInfo(factories.ConnectorModules), }, AsyncErrorChannel: col.asyncErrorChannel, - LoggingOptions: col.set.LoggingOptions, - - // TODO: inject the telemetry factory through factories. - // See https://github.com/open-telemetry/opentelemetry-collector/issues/4970 - TelemetryFactory: otelconftelemetry.NewFactory(), + BuildZapLogger: buildZapLogger, + TelemetryFactory: factories.Telemetry, }, cfg.Service) if err != nil { return err @@ -272,6 +282,7 @@ func (col *Collector) DryRun(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to initialize factories: %w", err) } + cfg, err := col.configProvider.Get(ctx, factories) if err != nil { return fmt.Errorf("failed to get config: %w", err) @@ -291,6 +302,7 @@ func (col *Collector) DryRun(ctx context.Context) error { ExportersFactories: factories.Exporters, ConnectorsConfigs: cfg.Connectors, ConnectorsFactories: factories.Connectors, + TelemetryFactory: factories.Telemetry, }, service.Config{ Pipelines: cfg.Service.Pipelines, }) @@ -335,12 +347,12 @@ func (col *Collector) Run(ctx context.Context) error { } // Always notify with SIGHUP for configuration reloading. - signal.Notify(col.signalsChannel, syscall.SIGHUP) + signal.Notify(col.signalsChannel, SIGHUP) defer signal.Stop(col.signalsChannel) // Only notify with SIGTERM and SIGINT if graceful shutdown is enabled. if !col.set.DisableGracefulShutdown { - signal.Notify(col.signalsChannel, os.Interrupt, syscall.SIGTERM) + signal.Notify(col.signalsChannel, os.Interrupt, SIGTERM) } // Control loop: selects between channels for various interrupts - when this loop is broken, the collector exits. @@ -361,7 +373,7 @@ LOOP: break LOOP case s := <-col.signalsChannel: col.service.Logger().Info("Received signal from OS", zap.String("signal", s.String())) - if s != syscall.SIGHUP { + if s != SIGHUP { break LOOP } if err := col.reloadConfiguration(ctx); err != nil { diff --git a/vendor/go.opentelemetry.io/collector/otelcol/collector_windows.go b/vendor/go.opentelemetry.io/collector/otelcol/collector_windows.go index f8fee8d99d6..94952478277 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/collector_windows.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/collector_windows.go @@ -10,6 +10,7 @@ import ( "flag" "fmt" "os" + "slices" "time" "go.uber.org/zap" @@ -18,8 +19,6 @@ import ( "golang.org/x/sys/windows/svc/eventlog" "go.opentelemetry.io/collector/featuregate" - "go.opentelemetry.io/collector/service" - "go.opentelemetry.io/collector/service/telemetry/otelconftelemetry" ) type windowsService struct { @@ -93,14 +92,19 @@ func (s *windowsService) start(elog *eventlog.Log, colErrorChannel chan error) e return err } - // The logging options need to be in place before the collector Run method is called - // since the telemetry creates the logger at the time of the Run method call. - // However, the zap.WrapCore function needs to read the serviceConfig to determine - // if the Windows Event Log should be used, however, the serviceConfig is also - // only read at the time of the Run method call. To work around this, we pass the - // serviceConfig as a pointer to the logging options, and then read its value - // when the zap.Logger is created by the telemetry. - s.col.set.LoggingOptions = loggingOptionsWithEventLogCore(elog, &s.col.serviceConfig, s.col.set.LoggingOptions) + // Override the Zap logger to write to the Windows Event Log + // if no file output is specified. + s.col.buildZapLogger = func(cfg zap.Config, opts ...zap.Option) (*zap.Logger, error) { + for _, output := range cfg.OutputPaths { + if output != "stdout" && output != "stderr" { + // A file has been specified in the configuration, + // so do not use the Windows Event Log. + return cfg.Build(opts...) + } + } + opts = slices.Insert(opts, 0, zap.WrapCore(withWindowsCore(elog))) + return cfg.Build(opts...) + } // col.Run blocks until receiving a SIGTERM signal, so needs to be started // asynchronously, but it will exit early if an error occurs on startup @@ -139,20 +143,6 @@ func openEventLog(serviceName string) (*eventlog.Log, error) { return elog, nil } -func loggingOptionsWithEventLogCore( - elog *eventlog.Log, - serviceConfig **service.Config, - userOptions []zap.Option, -) []zap.Option { - return append( - // The order below must be preserved - see PR #11051 - // The event log core must run *after* any user provided options, so it - // must be the first option in this list. - []zap.Option{zap.WrapCore(withWindowsCore(elog, serviceConfig))}, - userOptions..., - ) -} - var _ zapcore.Core = (*windowsEventLogCore)(nil) type windowsEventLogCore struct { @@ -212,17 +202,8 @@ func (w windowsEventLogCore) Sync() error { return w.core.Sync() } -func withWindowsCore(elog *eventlog.Log, serviceConfig **service.Config) func(zapcore.Core) zapcore.Core { +func withWindowsCore(elog *eventlog.Log) func(zapcore.Core) zapcore.Core { return func(core zapcore.Core) zapcore.Core { - if serviceConfig != nil && *serviceConfig != nil { - for _, output := range (*serviceConfig).Telemetry.(*otelconftelemetry.Config).Logs.OutputPaths { - if output != "stdout" && output != "stderr" { - // A log file was specified in the configuration, so we should not use the Windows Event Log - return core - } - } - } - // Use the Windows Event Log encoderConfig := zap.NewProductionEncoderConfig() encoderConfig.LineEnding = "\r\n" diff --git a/vendor/go.opentelemetry.io/collector/otelcol/command_components.go b/vendor/go.opentelemetry.io/collector/otelcol/command_components.go index 79961a02ce2..92ffeef40fd 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/command_components.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/command_components.go @@ -8,12 +8,14 @@ import ( "sort" "github.com/spf13/cobra" - yaml "go.yaml.in/yaml/v3" + "go.yaml.in/yaml/v3" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/internal/componentalias" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/receiver" ) @@ -117,18 +119,12 @@ func newComponentsCommand(set CollectorSettings) *cobra.Command { } components.BuildInfo = set.BuildInfo - for providerScheme, providerModuleModule := range set.ProviderModules { - components.Providers = append(components.Providers, componentWithoutStability{ - Scheme: providerScheme, - Module: providerModuleModule, - }) - } - - for _, converterModule := range set.ConverterModules { - components.Converters = append(components.Converters, componentWithoutStability{ - Module: converterModule, - }) - } + components.Providers = append(components.Providers, sortProvidersByScheme( + set.ProviderModules, + set.ConfigProviderSettings.ResolverSettings.ProviderFactories, + set.ConfigProviderSettings.ResolverSettings.ProviderSettings, + )...) + components.Converters = append(components.Converters, sortConverterModules(set.ConverterModules)...) yamlData, err := yaml.Marshal(components) if err != nil { @@ -155,8 +151,53 @@ func sortFactoriesByType[T component.Factory](factories map[component.Type]T) [] // Build and return list of factories, sorted by component types sortedFactories := make([]T, 0, len(factories)) for _, componentType := range componentTypes { - sortedFactories = append(sortedFactories, factories[componentType]) + if !isComponentAlias(factories[componentType]) { + sortedFactories = append(sortedFactories, factories[componentType]) + } } return sortedFactories } + +func sortProvidersByScheme(providerModules map[string]string, provFactories []confmap.ProviderFactory, set confmap.ProviderSettings) []componentWithoutStability { + schemes := make([]string, 0, len(provFactories)) + for _, f := range provFactories { + provF := f.Create(set) + scheme := provF.Scheme() + if !isComponentAlias(f) { + schemes = append(schemes, scheme) + } + } + + sort.Strings(schemes) + + providerComponents := make([]componentWithoutStability, 0, len(providerModules)) + for _, scheme := range schemes { + providerComponents = append(providerComponents, componentWithoutStability{ + Scheme: scheme, + Module: providerModules[scheme], + }) + } + return providerComponents +} + +func sortConverterModules(modules []string) []componentWithoutStability { + sortedModulesCopy := make([]string, len(modules)) + copy(sortedModulesCopy, modules) + sort.Strings(sortedModulesCopy) + + sortedModules := make([]componentWithoutStability, 0, len(modules)) + for _, mod := range sortedModulesCopy { + sortedModules = append(sortedModules, componentWithoutStability{ + Module: mod, + }) + } + return sortedModules +} + +func isComponentAlias(component any) bool { + if al, ok := component.(componentalias.TypeAliasHolder); ok { + return al.DeprecatedAlias().String() != "" + } + return false +} diff --git a/vendor/go.opentelemetry.io/collector/otelcol/command_print.go b/vendor/go.opentelemetry.io/collector/otelcol/command_print.go index ff78d18c162..5c8bcc739f5 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/command_print.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/command_print.go @@ -12,7 +12,7 @@ import ( "strings" "github.com/spf13/cobra" - yaml "go.yaml.in/yaml/v3" + "go.yaml.in/yaml/v3" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/confmap/xconfmap" @@ -23,7 +23,7 @@ const featureGateName = "otelcol.printInitialConfig" var printCommandFeatureFlag = featuregate.GlobalRegistry().MustRegister( featureGateName, - featuregate.StageAlpha, + featuregate.StageBeta, featuregate.WithRegisterFromVersion("v0.120.0"), featuregate.WithRegisterDescription("if set to true, enable the print-config command"), ) diff --git a/vendor/go.opentelemetry.io/collector/otelcol/factories.go b/vendor/go.opentelemetry.io/collector/otelcol/factories.go index c080dbde584..1ef4b0498c1 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/factories.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/factories.go @@ -10,8 +10,10 @@ import ( "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/internal/componentalias" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/service/telemetry" ) // Factories struct holds in a single type all component factories that @@ -32,6 +34,9 @@ type Factories struct { // Connectors maps connector type names in the config to the respective factory. Connectors map[component.Type]connector.Factory + // Telemetry is the factory to create the telemetry providers for the service. + Telemetry telemetry.Factory + // ReceiverModules maps receiver types to their respective go modules. ReceiverModules map[component.Type]string @@ -50,6 +55,7 @@ type Factories struct { // MakeFactoryMap takes a list of factories and returns a map with Factory type as keys. // It returns a non-nil error when there are factories with duplicate type. +// If a factory has a deprecated alias, the map will also contain an entry for the alias. func MakeFactoryMap[T component.Factory](factories ...T) (map[component.Type]T, error) { fMap := map[component.Type]T{} for _, f := range factories { @@ -57,6 +63,17 @@ func MakeFactoryMap[T component.Factory](factories ...T) (map[component.Type]T, return fMap, fmt.Errorf("duplicate component factory %q", f.Type()) } fMap[f.Type()] = f + + // If factory has a deprecated alias, add it to the map as well + if aliasHolder, ok := any(f).(componentalias.TypeAliasHolder); ok { + alias := aliasHolder.DeprecatedAlias() + if alias.String() != "" { + if _, exists := fMap[alias]; exists { + return fMap, fmt.Errorf("duplicate component factory %q (alias of %q)", alias, f.Type()) + } + fMap[alias] = f + } + } } return fMap, nil } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/flags.go b/vendor/go.opentelemetry.io/collector/otelcol/flags.go index 06f9989adb3..9a5e243cac4 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/flags.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/flags.go @@ -40,12 +40,12 @@ func flags(reg *featuregate.Registry) *flag.FlagSet { "Set arbitrary component config property. The component has to be defined in the config file and the flag"+ " has a higher precedence. Array config properties are overridden and maps are joined. Example --set=processors.batch.timeout=2s", func(s string) error { - idx := strings.Index(s, "=") - if idx == -1 { + before, after, ok := strings.Cut(s, "=") + if !ok { // No need for more context, see TestSetFlag/invalid_set. return errors.New("missing equal sign") } - cfgs.sets = append(cfgs.sets, "yaml:"+strings.TrimSpace(strings.ReplaceAll(s[:idx], ".", "::"))+": "+strings.TrimSpace(s[idx+1:])) + cfgs.sets = append(cfgs.sets, "yaml:"+strings.TrimSpace(strings.ReplaceAll(before, ".", "::"))+": "+strings.TrimSpace(after)) return nil }) diff --git a/vendor/go.opentelemetry.io/collector/otelcol/signals_others.go b/vendor/go.opentelemetry.io/collector/otelcol/signals_others.go new file mode 100644 index 00000000000..64747d319b4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/otelcol/signals_others.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !(js && wasm) + +package otelcol // import "go.opentelemetry.io/collector/otelcol" + +import ( + "syscall" +) + +const ( + SIGHUP = syscall.SIGHUP + SIGTERM = syscall.SIGTERM +) diff --git a/vendor/go.opentelemetry.io/collector/otelcol/signals_wasm.go b/vendor/go.opentelemetry.io/collector/otelcol/signals_wasm.go new file mode 100644 index 00000000000..2e738f65733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/otelcol/signals_wasm.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build js && wasm + +package otelcol // import "go.opentelemetry.io/collector/otelcol" + +import ( + "syscall" +) + +const ( + SIGHUP = syscall.Signal(-1) + SIGTERM = syscall.Signal(-1) +) diff --git a/vendor/go.opentelemetry.io/collector/otelcol/unmarshaler.go b/vendor/go.opentelemetry.io/collector/otelcol/unmarshaler.go index 0874e48f417..66ea7f8b09d 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/unmarshaler.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/unmarshaler.go @@ -4,6 +4,8 @@ package otelcol // import "go.opentelemetry.io/collector/otelcol" import ( + "errors" + "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" @@ -12,9 +14,10 @@ import ( "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/service" - "go.opentelemetry.io/collector/service/telemetry/otelconftelemetry" ) +var errNilTelemetryFactory = errors.New("otelcol.Factories.Telemetry must not be nil. For example, you can use otelconftelemetry.NewFactory to build a telemetry factory") + type configSettings struct { Receivers *configunmarshaler.Configs[receiver.Factory] `mapstructure:"receivers"` Processors *configunmarshaler.Configs[processor.Factory] `mapstructure:"processors"` @@ -27,10 +30,9 @@ type configSettings struct { // unmarshal the configSettings from a confmap.Conf. // After the config is unmarshalled, `Validate()` must be called to validate. func unmarshal(v *confmap.Conf, factories Factories) (*configSettings, error) { - // TODO: inject the telemetry factory through factories, once available. - // See https://github.com/open-telemetry/opentelemetry-collector/issues/4970 - telFactory := otelconftelemetry.NewFactory() - defaultTelConfig := telFactory.CreateDefaultConfig().(*otelconftelemetry.Config) + if factories.Telemetry == nil { + return nil, errNilTelemetryFactory + } // Unmarshal top level sections and validate. cfg := &configSettings{ @@ -41,7 +43,7 @@ func unmarshal(v *confmap.Conf, factories Factories) (*configSettings, error) { Extensions: configunmarshaler.NewConfigs(factories.Extensions), // TODO: Add a component.ServiceFactory to allow this to be defined by the Service. Service: service.Config{ - Telemetry: defaultTelConfig, + Telemetry: factories.Telemetry.CreateDefaultConfig(), }, } err := v.Unmarshal(&cfg) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/bytesid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/bytesid.go similarity index 89% rename from vendor/go.opentelemetry.io/collector/pdata/internal/data/bytesid.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/bytesid.go index 239adaf6c0e..dbf368d3d11 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/bytesid.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/bytesid.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package data // import "go.opentelemetry.io/collector/pdata/internal/data" +package internal // import "go.opentelemetry.io/collector/pdata/internal" import ( "encoding/hex" diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go deleted file mode 100644 index 72b5f5752e6..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package data // import "go.opentelemetry.io/collector/pdata/internal/data" - -import ( - "encoding/hex" - "errors" - - "github.com/gogo/protobuf/proto" - - "go.opentelemetry.io/collector/pdata/internal/json" -) - -const profileIDSize = 16 - -var ( - errMarshalProfileID = errors.New("marshal: invalid buffer length for ProfileID") - errUnmarshalProfileID = errors.New("unmarshal: invalid ProfileID length") -) - -// ProfileID is a custom data type that is used for all profile_id fields in OTLP -// Protobuf messages. -type ProfileID [profileIDSize]byte - -var _ proto.Sizer = (*ProfileID)(nil) - -// Size returns the size of the data to serialize. -func (tid ProfileID) Size() int { - if tid.IsEmpty() { - return 0 - } - return profileIDSize -} - -// IsEmpty returns true if id contains at leas one non-zero byte. -func (tid ProfileID) IsEmpty() bool { - return tid == [profileIDSize]byte{} -} - -// MarshalTo converts profile ID into a binary representation. Called by Protobuf serialization. -func (tid ProfileID) MarshalTo(data []byte) (n int, err error) { - if tid.IsEmpty() { - return 0, nil - } - - if len(data) < profileIDSize { - return 0, errMarshalProfileID - } - - return copy(data, tid[:]), nil -} - -// Unmarshal inflates this profile ID from binary representation. Called by Protobuf serialization. -func (tid *ProfileID) Unmarshal(data []byte) error { - if len(data) == 0 { - *tid = [profileIDSize]byte{} - return nil - } - - if len(data) != profileIDSize { - return errUnmarshalProfileID - } - - copy(tid[:], data) - return nil -} - -// MarshalJSONStream converts ProfileID into a hex string. -func (tid ProfileID) MarshalJSONStream(dest *json.Stream) { - dest.WriteString(hex.EncodeToString(tid[:])) -} - -// UnmarshalJSONIter decodes ProfileID from hex string. -func (tid *ProfileID) UnmarshalJSONIter(iter *json.Iterator) { - *tid = [profileIDSize]byte{} - unmarshalJSON(tid[:], iter) -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1/logs_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1/logs_service.pb.go deleted file mode 100644 index 53b69b07267..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1/logs_service.pb.go +++ /dev/null @@ -1,840 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/collector/logs/v1/logs_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - - v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ExportLogsServiceRequest struct { - // An array of ResourceLogs. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - ResourceLogs []*v1.ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"` -} - -func (m *ExportLogsServiceRequest) Reset() { *m = ExportLogsServiceRequest{} } -func (m *ExportLogsServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportLogsServiceRequest) ProtoMessage() {} -func (*ExportLogsServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8e3bf87aaa43acd4, []int{0} -} -func (m *ExportLogsServiceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportLogsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportLogsServiceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportLogsServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportLogsServiceRequest.Merge(m, src) -} -func (m *ExportLogsServiceRequest) XXX_Size() int { - return m.Size() -} -func (m *ExportLogsServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportLogsServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportLogsServiceRequest proto.InternalMessageInfo - -func (m *ExportLogsServiceRequest) GetResourceLogs() []*v1.ResourceLogs { - if m != nil { - return m.ResourceLogs - } - return nil -} - -type ExportLogsServiceResponse struct { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - PartialSuccess ExportLogsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"` -} - -func (m *ExportLogsServiceResponse) Reset() { *m = ExportLogsServiceResponse{} } -func (m *ExportLogsServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportLogsServiceResponse) ProtoMessage() {} -func (*ExportLogsServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8e3bf87aaa43acd4, []int{1} -} -func (m *ExportLogsServiceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportLogsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportLogsServiceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportLogsServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportLogsServiceResponse.Merge(m, src) -} -func (m *ExportLogsServiceResponse) XXX_Size() int { - return m.Size() -} -func (m *ExportLogsServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportLogsServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportLogsServiceResponse proto.InternalMessageInfo - -func (m *ExportLogsServiceResponse) GetPartialSuccess() ExportLogsPartialSuccess { - if m != nil { - return m.PartialSuccess - } - return ExportLogsPartialSuccess{} -} - -type ExportLogsPartialSuccess struct { - // The number of rejected log records. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - RejectedLogRecords int64 `protobuf:"varint,1,opt,name=rejected_log_records,json=rejectedLogRecords,proto3" json:"rejected_log_records,omitempty"` - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (m *ExportLogsPartialSuccess) Reset() { *m = ExportLogsPartialSuccess{} } -func (m *ExportLogsPartialSuccess) String() string { return proto.CompactTextString(m) } -func (*ExportLogsPartialSuccess) ProtoMessage() {} -func (*ExportLogsPartialSuccess) Descriptor() ([]byte, []int) { - return fileDescriptor_8e3bf87aaa43acd4, []int{2} -} -func (m *ExportLogsPartialSuccess) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportLogsPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportLogsPartialSuccess.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportLogsPartialSuccess) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportLogsPartialSuccess.Merge(m, src) -} -func (m *ExportLogsPartialSuccess) XXX_Size() int { - return m.Size() -} -func (m *ExportLogsPartialSuccess) XXX_DiscardUnknown() { - xxx_messageInfo_ExportLogsPartialSuccess.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportLogsPartialSuccess proto.InternalMessageInfo - -func (m *ExportLogsPartialSuccess) GetRejectedLogRecords() int64 { - if m != nil { - return m.RejectedLogRecords - } - return 0 -} - -func (m *ExportLogsPartialSuccess) GetErrorMessage() string { - if m != nil { - return m.ErrorMessage - } - return "" -} - -func init() { - proto.RegisterType((*ExportLogsServiceRequest)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest") - proto.RegisterType((*ExportLogsServiceResponse)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse") - proto.RegisterType((*ExportLogsPartialSuccess)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsPartialSuccess") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/collector/logs/v1/logs_service.proto", fileDescriptor_8e3bf87aaa43acd4) -} - -var fileDescriptor_8e3bf87aaa43acd4 = []byte{ - // 430 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xc1, 0x6e, 0x13, 0x31, - 0x10, 0x86, 0xd7, 0x2d, 0xaa, 0x84, 0xd3, 0x02, 0xb2, 0x7a, 0x08, 0x39, 0x2c, 0x55, 0x50, 0x51, - 0xb8, 0x78, 0x49, 0xb8, 0x70, 0x03, 0x05, 0x71, 0x0b, 0x10, 0x6d, 0x11, 0x07, 0x2e, 0xab, 0xc5, - 0x19, 0x59, 0x5b, 0x6d, 0x77, 0xdc, 0xb1, 0x13, 0xc1, 0x33, 0x20, 0x24, 0x5e, 0x80, 0x17, 0xe0, - 0x49, 0x7a, 0xe0, 0xd0, 0x23, 0x27, 0x84, 0x92, 0x17, 0x41, 0x5e, 0x97, 0xb0, 0x0b, 0x39, 0x04, - 0x4e, 0xbb, 0x1e, 0xcf, 0xff, 0xfd, 0xff, 0xd8, 0x32, 0x7f, 0x84, 0x06, 0x2a, 0x07, 0x25, 0x9c, - 0x81, 0xa3, 0xf7, 0x89, 0x21, 0x74, 0x98, 0x28, 0x2c, 0x4b, 0x50, 0x0e, 0x29, 0x29, 0x51, 0xdb, - 0x64, 0x31, 0xac, 0xbf, 0x99, 0x05, 0x5a, 0x14, 0x0a, 0x64, 0xdd, 0x24, 0x8e, 0x5b, 0xca, 0x50, - 0x94, 0x6b, 0xa5, 0xf4, 0x0a, 0xb9, 0x18, 0xf6, 0x0e, 0x35, 0x6a, 0x0c, 0x58, 0xff, 0x17, 0xfa, - 0x7a, 0xf7, 0x36, 0xd9, 0x36, 0xcd, 0x42, 0x5f, 0xff, 0x94, 0x77, 0x9f, 0xbd, 0x33, 0x48, 0x6e, - 0x82, 0xda, 0x9e, 0x04, 0xff, 0x14, 0xce, 0xe7, 0x60, 0x9d, 0x78, 0xc1, 0x0f, 0x08, 0x2c, 0xce, - 0x49, 0x41, 0xe6, 0x25, 0x5d, 0x76, 0xb4, 0x3b, 0xe8, 0x8c, 0xee, 0xcb, 0x4d, 0xc1, 0xae, 0xe2, - 0xc8, 0xf4, 0x4a, 0xe1, 0x79, 0xe9, 0x3e, 0x35, 0x56, 0xfd, 0x0f, 0x8c, 0xdf, 0xde, 0x60, 0x66, - 0x0d, 0x56, 0x16, 0x44, 0xc5, 0x6f, 0x9a, 0x9c, 0x5c, 0x91, 0x97, 0x99, 0x9d, 0x2b, 0x05, 0xd6, - 0xfb, 0xb1, 0x41, 0x67, 0xf4, 0x58, 0x6e, 0x75, 0x10, 0xf2, 0x37, 0x7a, 0x1a, 0x38, 0x27, 0x01, - 0x33, 0xbe, 0x76, 0xf1, 0xfd, 0x4e, 0x94, 0xde, 0x30, 0xad, 0x6a, 0xff, 0xbc, 0x39, 0x79, 0x5b, - 0x21, 0x1e, 0xf0, 0x43, 0x82, 0x53, 0x50, 0x0e, 0x66, 0x7e, 0xf2, 0x8c, 0x40, 0x21, 0xcd, 0x42, - 0xa0, 0xdd, 0x54, 0xfc, 0xda, 0x9b, 0xa0, 0x4e, 0xc3, 0x8e, 0xb8, 0xcb, 0x0f, 0x80, 0x08, 0x29, - 0x3b, 0x03, 0x6b, 0x73, 0x0d, 0xdd, 0x9d, 0x23, 0x36, 0xb8, 0x9e, 0xee, 0xd7, 0xc5, 0xe7, 0xa1, - 0x36, 0xfa, 0xcc, 0x78, 0xa7, 0x31, 0xba, 0xf8, 0xc8, 0xf8, 0x5e, 0xc8, 0x20, 0xfe, 0x7d, 0xc8, - 0xf6, 0x65, 0xf5, 0x9e, 0xfc, 0x3f, 0x20, 0x5c, 0x40, 0x3f, 0x1a, 0x7f, 0x65, 0x17, 0xcb, 0x98, - 0x5d, 0x2e, 0x63, 0xf6, 0x63, 0x19, 0xb3, 0x4f, 0xab, 0x38, 0xba, 0x5c, 0xc5, 0xd1, 0xb7, 0x55, - 0x1c, 0xf1, 0x41, 0x81, 0xdb, 0x19, 0x8c, 0x6f, 0x35, 0xd8, 0x53, 0xdf, 0x33, 0x65, 0x6f, 0x26, - 0xfa, 0x4f, 0x75, 0xd1, 0x7c, 0x04, 0x66, 0x96, 0xbb, 0x3c, 0x29, 0x2a, 0x07, 0x54, 0xe5, 0x65, - 0x52, 0xaf, 0x6a, 0xbc, 0x86, 0xea, 0xef, 0xb7, 0xf2, 0x65, 0xe7, 0xf8, 0xa5, 0x81, 0xea, 0xd5, - 0x9a, 0x55, 0xbb, 0xc8, 0xa7, 0xeb, 0x24, 0x3e, 0x80, 0x7c, 0x3d, 0x7c, 0xbb, 0x57, 0x33, 0x1e, - 0xfe, 0x0c, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xaf, 0x6c, 0x7d, 0x83, 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// LogsServiceClient is the client API for LogsService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LogsServiceClient interface { - Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) -} - -type logsServiceClient struct { - cc *grpc.ClientConn -} - -func NewLogsServiceClient(cc *grpc.ClientConn) LogsServiceClient { - return &logsServiceClient{cc} -} - -func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) { - out := new(ExportLogsServiceResponse) - err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// LogsServiceServer is the server API for LogsService service. -type LogsServiceServer interface { - Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) -} - -// UnimplementedLogsServiceServer can be embedded to have forward compatible implementations. -type UnimplementedLogsServiceServer struct { -} - -func (*UnimplementedLogsServiceServer) Export(ctx context.Context, req *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") -} - -func RegisterLogsServiceServer(s *grpc.Server, srv LogsServiceServer) { - s.RegisterService(&_LogsService_serviceDesc, srv) -} - -func _LogsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExportLogsServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LogsServiceServer).Export(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LogsServiceServer).Export(ctx, req.(*ExportLogsServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _LogsService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService", - HandlerType: (*LogsServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Export", - Handler: _LogsService_Export_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto", -} - -func (m *ExportLogsServiceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportLogsServiceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportLogsServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceLogs) > 0 { - for iNdEx := len(m.ResourceLogs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogsService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ExportLogsServiceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportLogsServiceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportLogsServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogsService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ExportLogsPartialSuccess) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportLogsPartialSuccess) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportLogsPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ErrorMessage) > 0 { - i -= len(m.ErrorMessage) - copy(dAtA[i:], m.ErrorMessage) - i = encodeVarintLogsService(dAtA, i, uint64(len(m.ErrorMessage))) - i-- - dAtA[i] = 0x12 - } - if m.RejectedLogRecords != 0 { - i = encodeVarintLogsService(dAtA, i, uint64(m.RejectedLogRecords)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintLogsService(dAtA []byte, offset int, v uint64) int { - offset -= sovLogsService(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ExportLogsServiceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceLogs) > 0 { - for _, e := range m.ResourceLogs { - l = e.Size() - n += 1 + l + sovLogsService(uint64(l)) - } - } - return n -} - -func (m *ExportLogsServiceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.PartialSuccess.Size() - n += 1 + l + sovLogsService(uint64(l)) - return n -} - -func (m *ExportLogsPartialSuccess) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RejectedLogRecords != 0 { - n += 1 + sovLogsService(uint64(m.RejectedLogRecords)) - } - l = len(m.ErrorMessage) - if l > 0 { - n += 1 + l + sovLogsService(uint64(l)) - } - return n -} - -func sovLogsService(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozLogsService(x uint64) (n int) { - return sovLogsService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ExportLogsServiceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportLogsServiceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportLogsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogsService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogsService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceLogs = append(m.ResourceLogs, &v1.ResourceLogs{}) - if err := m.ResourceLogs[len(m.ResourceLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogsService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLogsService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportLogsServiceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportLogsServiceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportLogsServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogsService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogsService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogsService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLogsService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportLogsPartialSuccess) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportLogsPartialSuccess: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportLogsPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RejectedLogRecords", wireType) - } - m.RejectedLogRecords = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RejectedLogRecords |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLogsService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLogsService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ErrorMessage = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogsService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLogsService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipLogsService(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogsService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogsService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogsService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthLogsService - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupLogsService - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthLogsService - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthLogsService = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowLogsService = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupLogsService = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go deleted file mode 100644 index bfdc29395a3..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go +++ /dev/null @@ -1,840 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - - v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ExportMetricsServiceRequest struct { - // An array of ResourceMetrics. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - ResourceMetrics []*v1.ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"` -} - -func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} } -func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportMetricsServiceRequest) ProtoMessage() {} -func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_75fb6015e6e64798, []int{0} -} -func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src) -} -func (m *ExportMetricsServiceRequest) XXX_Size() int { - return m.Size() -} -func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo - -func (m *ExportMetricsServiceRequest) GetResourceMetrics() []*v1.ResourceMetrics { - if m != nil { - return m.ResourceMetrics - } - return nil -} - -type ExportMetricsServiceResponse struct { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - PartialSuccess ExportMetricsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"` -} - -func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} } -func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportMetricsServiceResponse) ProtoMessage() {} -func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_75fb6015e6e64798, []int{1} -} -func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src) -} -func (m *ExportMetricsServiceResponse) XXX_Size() int { - return m.Size() -} -func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo - -func (m *ExportMetricsServiceResponse) GetPartialSuccess() ExportMetricsPartialSuccess { - if m != nil { - return m.PartialSuccess - } - return ExportMetricsPartialSuccess{} -} - -type ExportMetricsPartialSuccess struct { - // The number of rejected data points. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - RejectedDataPoints int64 `protobuf:"varint,1,opt,name=rejected_data_points,json=rejectedDataPoints,proto3" json:"rejected_data_points,omitempty"` - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (m *ExportMetricsPartialSuccess) Reset() { *m = ExportMetricsPartialSuccess{} } -func (m *ExportMetricsPartialSuccess) String() string { return proto.CompactTextString(m) } -func (*ExportMetricsPartialSuccess) ProtoMessage() {} -func (*ExportMetricsPartialSuccess) Descriptor() ([]byte, []int) { - return fileDescriptor_75fb6015e6e64798, []int{2} -} -func (m *ExportMetricsPartialSuccess) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportMetricsPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportMetricsPartialSuccess.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportMetricsPartialSuccess) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportMetricsPartialSuccess.Merge(m, src) -} -func (m *ExportMetricsPartialSuccess) XXX_Size() int { - return m.Size() -} -func (m *ExportMetricsPartialSuccess) XXX_DiscardUnknown() { - xxx_messageInfo_ExportMetricsPartialSuccess.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportMetricsPartialSuccess proto.InternalMessageInfo - -func (m *ExportMetricsPartialSuccess) GetRejectedDataPoints() int64 { - if m != nil { - return m.RejectedDataPoints - } - return 0 -} - -func (m *ExportMetricsPartialSuccess) GetErrorMessage() string { - if m != nil { - return m.ErrorMessage - } - return "" -} - -func init() { - proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest") - proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse") - proto.RegisterType((*ExportMetricsPartialSuccess)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsPartialSuccess") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/collector/metrics/v1/metrics_service.proto", fileDescriptor_75fb6015e6e64798) -} - -var fileDescriptor_75fb6015e6e64798 = []byte{ - // 427 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xbf, 0x8e, 0xd3, 0x30, - 0x18, 0x8f, 0xef, 0xd0, 0x49, 0xf8, 0xe0, 0x0e, 0x99, 0x1b, 0x4e, 0x05, 0x85, 0x53, 0x58, 0x22, - 0x81, 0x1c, 0x5a, 0x76, 0x86, 0xc2, 0xb1, 0x9d, 0x1a, 0xa5, 0x88, 0xa1, 0x4b, 0x64, 0xdc, 0x4f, - 0x51, 0x50, 0x1a, 0x1b, 0xdb, 0xad, 0xe8, 0x5b, 0x30, 0xb0, 0xf0, 0x0a, 0x88, 0x07, 0xe9, 0xd8, - 0xb1, 0x13, 0x42, 0xed, 0x8b, 0xa0, 0xc4, 0x69, 0xc1, 0x25, 0x43, 0xc5, 0x6d, 0xce, 0xcf, 0xdf, - 0xef, 0x4f, 0x7e, 0xd6, 0x87, 0x5f, 0x09, 0x09, 0xa5, 0x81, 0x02, 0x26, 0x60, 0xd4, 0x3c, 0x92, - 0x4a, 0x18, 0x11, 0x71, 0x51, 0x14, 0xc0, 0x8d, 0x50, 0x51, 0x85, 0xe6, 0x5c, 0x47, 0xb3, 0xee, - 0xf6, 0x98, 0x6a, 0x50, 0xb3, 0x9c, 0x03, 0xad, 0x47, 0x49, 0xe8, 0xf0, 0x2d, 0x48, 0x77, 0x7c, - 0xda, 0x90, 0xe8, 0xac, 0xdb, 0xb9, 0xc8, 0x44, 0x26, 0xac, 0x7e, 0x75, 0xb2, 0xa3, 0x9d, 0xe7, - 0x6d, 0xfe, 0xff, 0xba, 0xda, 0xe9, 0x60, 0x8e, 0x1f, 0x5d, 0x7f, 0x96, 0x42, 0x99, 0x1b, 0x0b, - 0x0f, 0x6d, 0x96, 0x04, 0x3e, 0x4d, 0x41, 0x1b, 0x32, 0xc2, 0x0f, 0x14, 0x68, 0x31, 0x55, 0x1c, - 0xd2, 0x86, 0x78, 0x89, 0xae, 0x8e, 0xc3, 0xd3, 0x5e, 0x44, 0xdb, 0x72, 0xfe, 0x49, 0x47, 0x93, - 0x86, 0xd7, 0x08, 0x27, 0xe7, 0xca, 0x05, 0x82, 0xaf, 0x08, 0x3f, 0x6e, 0xf7, 0xd6, 0x52, 0x94, - 0x1a, 0x88, 0xc1, 0xe7, 0x92, 0x29, 0x93, 0xb3, 0x22, 0xd5, 0x53, 0xce, 0x41, 0x57, 0xde, 0x28, - 0x3c, 0xed, 0x5d, 0xd3, 0x43, 0x3b, 0xa2, 0x8e, 0x41, 0x6c, 0xd5, 0x86, 0x56, 0xac, 0x7f, 0x67, - 0xf1, 0xf3, 0x89, 0x97, 0x9c, 0x49, 0x07, 0x0d, 0xcc, 0x5e, 0x23, 0x2e, 0x89, 0xbc, 0xc0, 0x17, - 0x0a, 0x3e, 0x02, 0x37, 0x30, 0x4e, 0xc7, 0xcc, 0xb0, 0x54, 0x8a, 0xbc, 0x34, 0x36, 0xd9, 0x71, - 0x42, 0xb6, 0x77, 0x6f, 0x98, 0x61, 0x71, 0x7d, 0x43, 0x9e, 0xe2, 0xfb, 0xa0, 0x94, 0x50, 0xe9, - 0x04, 0xb4, 0x66, 0x19, 0x5c, 0x1e, 0x5d, 0xa1, 0xf0, 0x6e, 0x72, 0xaf, 0x06, 0x6f, 0x2c, 0xd6, - 0xfb, 0x81, 0xf0, 0x99, 0x5b, 0x03, 0xf9, 0x86, 0xf0, 0x89, 0x4d, 0x42, 0xfe, 0xf7, 0x87, 0xdd, - 0xd7, 0xec, 0xbc, 0xbd, 0xad, 0x8c, 0x7d, 0x98, 0xc0, 0xeb, 0xaf, 0xd0, 0x62, 0xed, 0xa3, 0xe5, - 0xda, 0x47, 0xbf, 0xd6, 0x3e, 0xfa, 0xb2, 0xf1, 0xbd, 0xe5, 0xc6, 0xf7, 0x56, 0x1b, 0xdf, 0xc3, - 0xcf, 0x72, 0x71, 0xb0, 0x4d, 0xff, 0xa1, 0xeb, 0x10, 0x57, 0x93, 0x31, 0x1a, 0x0d, 0xb2, 0x7d, - 0x8d, 0xfc, 0xef, 0x1d, 0x92, 0x55, 0xf1, 0x51, 0x5e, 0x1a, 0x50, 0x25, 0x2b, 0xa2, 0xfa, 0xab, - 0x36, 0xc9, 0xa0, 0x6c, 0x5d, 0xb5, 0xef, 0x47, 0xe1, 0x40, 0x42, 0xf9, 0x6e, 0x27, 0x57, 0x1b, - 0xd1, 0xd7, 0xbb, 0x48, 0x4d, 0x0c, 0xfa, 0xbe, 0xfb, 0xe1, 0xa4, 0x56, 0x7a, 0xf9, 0x3b, 0x00, - 0x00, 0xff, 0xff, 0x47, 0xf2, 0x5f, 0x42, 0xc8, 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MetricsServiceClient is the client API for MetricsService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetricsServiceClient interface { - Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) -} - -type metricsServiceClient struct { - cc *grpc.ClientConn -} - -func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { - return &metricsServiceClient{cc} -} - -func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) { - out := new(ExportMetricsServiceResponse) - err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MetricsServiceServer is the server API for MetricsService service. -type MetricsServiceServer interface { - Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) -} - -// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations. -type UnimplementedMetricsServiceServer struct { -} - -func (*UnimplementedMetricsServiceServer) Export(ctx context.Context, req *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") -} - -func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { - s.RegisterService(&_MetricsService_serviceDesc, srv) -} - -func _MetricsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExportMetricsServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricsServiceServer).Export(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricsServiceServer).Export(ctx, req.(*ExportMetricsServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _MetricsService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService", - HandlerType: (*MetricsServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Export", - Handler: _MetricsService_Export_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto", -} - -func (m *ExportMetricsServiceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportMetricsServiceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportMetricsServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceMetrics) > 0 { - for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetricsService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ExportMetricsServiceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportMetricsServiceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportMetricsServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetricsService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ExportMetricsPartialSuccess) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportMetricsPartialSuccess) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportMetricsPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ErrorMessage) > 0 { - i -= len(m.ErrorMessage) - copy(dAtA[i:], m.ErrorMessage) - i = encodeVarintMetricsService(dAtA, i, uint64(len(m.ErrorMessage))) - i-- - dAtA[i] = 0x12 - } - if m.RejectedDataPoints != 0 { - i = encodeVarintMetricsService(dAtA, i, uint64(m.RejectedDataPoints)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintMetricsService(dAtA []byte, offset int, v uint64) int { - offset -= sovMetricsService(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ExportMetricsServiceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceMetrics) > 0 { - for _, e := range m.ResourceMetrics { - l = e.Size() - n += 1 + l + sovMetricsService(uint64(l)) - } - } - return n -} - -func (m *ExportMetricsServiceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.PartialSuccess.Size() - n += 1 + l + sovMetricsService(uint64(l)) - return n -} - -func (m *ExportMetricsPartialSuccess) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RejectedDataPoints != 0 { - n += 1 + sovMetricsService(uint64(m.RejectedDataPoints)) - } - l = len(m.ErrorMessage) - if l > 0 { - n += 1 + l + sovMetricsService(uint64(l)) - } - return n -} - -func sovMetricsService(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMetricsService(x uint64) (n int) { - return sovMetricsService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ExportMetricsServiceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportMetricsServiceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportMetricsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetricsService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetricsService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceMetrics = append(m.ResourceMetrics, &v1.ResourceMetrics{}) - if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetricsService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetricsService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportMetricsServiceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportMetricsServiceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportMetricsServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetricsService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetricsService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetricsService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetricsService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportMetricsPartialSuccess) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportMetricsPartialSuccess: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportMetricsPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RejectedDataPoints", wireType) - } - m.RejectedDataPoints = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RejectedDataPoints |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetricsService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetricsService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ErrorMessage = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetricsService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetricsService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMetricsService(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetricsService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetricsService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetricsService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMetricsService - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMetricsService - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMetricsService - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthMetricsService = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetricsService = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMetricsService = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go deleted file mode 100644 index 80eae38f558..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go +++ /dev/null @@ -1,897 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/collector/profiles/v1development/profiles_service.proto - -package v1development - -import ( - context "context" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - - v1development "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ExportProfilesServiceRequest struct { - // An array of ResourceProfiles. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - ResourceProfiles []*v1development.ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"` - // The reference table containing all data shared by profiles across the message being sent. - Dictionary v1development.ProfilesDictionary `protobuf:"bytes,2,opt,name=dictionary,proto3" json:"dictionary"` -} - -func (m *ExportProfilesServiceRequest) Reset() { *m = ExportProfilesServiceRequest{} } -func (m *ExportProfilesServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportProfilesServiceRequest) ProtoMessage() {} -func (*ExportProfilesServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_ad3943ce836e7720, []int{0} -} -func (m *ExportProfilesServiceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportProfilesServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportProfilesServiceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportProfilesServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportProfilesServiceRequest.Merge(m, src) -} -func (m *ExportProfilesServiceRequest) XXX_Size() int { - return m.Size() -} -func (m *ExportProfilesServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportProfilesServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportProfilesServiceRequest proto.InternalMessageInfo - -func (m *ExportProfilesServiceRequest) GetResourceProfiles() []*v1development.ResourceProfiles { - if m != nil { - return m.ResourceProfiles - } - return nil -} - -func (m *ExportProfilesServiceRequest) GetDictionary() v1development.ProfilesDictionary { - if m != nil { - return m.Dictionary - } - return v1development.ProfilesDictionary{} -} - -type ExportProfilesServiceResponse struct { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - PartialSuccess ExportProfilesPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"` -} - -func (m *ExportProfilesServiceResponse) Reset() { *m = ExportProfilesServiceResponse{} } -func (m *ExportProfilesServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportProfilesServiceResponse) ProtoMessage() {} -func (*ExportProfilesServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_ad3943ce836e7720, []int{1} -} -func (m *ExportProfilesServiceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportProfilesServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportProfilesServiceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportProfilesServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportProfilesServiceResponse.Merge(m, src) -} -func (m *ExportProfilesServiceResponse) XXX_Size() int { - return m.Size() -} -func (m *ExportProfilesServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportProfilesServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportProfilesServiceResponse proto.InternalMessageInfo - -func (m *ExportProfilesServiceResponse) GetPartialSuccess() ExportProfilesPartialSuccess { - if m != nil { - return m.PartialSuccess - } - return ExportProfilesPartialSuccess{} -} - -type ExportProfilesPartialSuccess struct { - // The number of rejected profiles. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - RejectedProfiles int64 `protobuf:"varint,1,opt,name=rejected_profiles,json=rejectedProfiles,proto3" json:"rejected_profiles,omitempty"` - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (m *ExportProfilesPartialSuccess) Reset() { *m = ExportProfilesPartialSuccess{} } -func (m *ExportProfilesPartialSuccess) String() string { return proto.CompactTextString(m) } -func (*ExportProfilesPartialSuccess) ProtoMessage() {} -func (*ExportProfilesPartialSuccess) Descriptor() ([]byte, []int) { - return fileDescriptor_ad3943ce836e7720, []int{2} -} -func (m *ExportProfilesPartialSuccess) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportProfilesPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportProfilesPartialSuccess.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportProfilesPartialSuccess) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportProfilesPartialSuccess.Merge(m, src) -} -func (m *ExportProfilesPartialSuccess) XXX_Size() int { - return m.Size() -} -func (m *ExportProfilesPartialSuccess) XXX_DiscardUnknown() { - xxx_messageInfo_ExportProfilesPartialSuccess.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportProfilesPartialSuccess proto.InternalMessageInfo - -func (m *ExportProfilesPartialSuccess) GetRejectedProfiles() int64 { - if m != nil { - return m.RejectedProfiles - } - return 0 -} - -func (m *ExportProfilesPartialSuccess) GetErrorMessage() string { - if m != nil { - return m.ErrorMessage - } - return "" -} - -func init() { - proto.RegisterType((*ExportProfilesServiceRequest)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceRequest") - proto.RegisterType((*ExportProfilesServiceResponse)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceResponse") - proto.RegisterType((*ExportProfilesPartialSuccess)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesPartialSuccess") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/collector/profiles/v1development/profiles_service.proto", fileDescriptor_ad3943ce836e7720) -} - -var fileDescriptor_ad3943ce836e7720 = []byte{ - // 467 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x8a, 0xd3, 0x40, - 0x18, 0xc7, 0x33, 0xbb, 0xb2, 0xe0, 0xac, 0xba, 0x1a, 0xf6, 0xb0, 0x14, 0x8d, 0x4b, 0xbc, 0x14, - 0x84, 0x09, 0x5b, 0x17, 0x44, 0x10, 0x0f, 0x75, 0x3d, 0x89, 0x18, 0x52, 0xf1, 0xa0, 0x87, 0x10, - 0x27, 0x9f, 0x61, 0x24, 0x9d, 0x19, 0x67, 0xa6, 0xc5, 0x1e, 0x7d, 0x03, 0xdf, 0xc1, 0x9b, 0x57, - 0x1f, 0xc2, 0x1e, 0x7b, 0xf4, 0x24, 0xd2, 0x3e, 0x80, 0x4f, 0x20, 0x48, 0x32, 0x4d, 0x6c, 0x42, - 0xa5, 0x58, 0x7a, 0xcb, 0x7c, 0xc3, 0xff, 0xf7, 0xff, 0x7f, 0xdf, 0x17, 0x06, 0x3f, 0x15, 0x12, - 0xb8, 0x81, 0x1c, 0x86, 0x60, 0xd4, 0x24, 0x90, 0x4a, 0x18, 0x11, 0x50, 0x91, 0xe7, 0x40, 0x8d, - 0x50, 0xc5, 0xf9, 0x2d, 0xcb, 0x41, 0x07, 0xe3, 0xb3, 0x14, 0xc6, 0x90, 0x0b, 0x39, 0x04, 0x6e, - 0xea, 0x72, 0xac, 0x41, 0x8d, 0x19, 0x05, 0x52, 0xea, 0xdc, 0xf3, 0x06, 0xcc, 0x16, 0x49, 0x0d, - 0x23, 0x95, 0x8a, 0x34, 0x60, 0x9d, 0xe3, 0x4c, 0x64, 0xc2, 0x1a, 0x17, 0x5f, 0x56, 0xd6, 0x79, - 0xb0, 0x2e, 0xd8, 0x86, 0x38, 0x56, 0xea, 0xff, 0x42, 0xf8, 0xe6, 0x93, 0x0f, 0x52, 0x28, 0x13, - 0x2e, 0x2f, 0x06, 0x36, 0x66, 0x04, 0xef, 0x47, 0xa0, 0x8d, 0xcb, 0xf0, 0x0d, 0x05, 0x5a, 0x8c, - 0x14, 0x85, 0xb8, 0xd2, 0x9e, 0xa0, 0xd3, 0xfd, 0xee, 0x61, 0xef, 0x21, 0x59, 0xd7, 0xc3, 0xfa, - 0xe4, 0x24, 0x5a, 0x42, 0x2a, 0x9b, 0xe8, 0xba, 0x6a, 0x55, 0xdc, 0x14, 0xe3, 0x94, 0x51, 0xc3, - 0x04, 0x4f, 0xd4, 0xe4, 0x64, 0xef, 0x14, 0x75, 0x0f, 0x7b, 0x8f, 0xfe, 0xc7, 0xa3, 0x22, 0x5d, - 0xd4, 0x94, 0xfe, 0xa5, 0xe9, 0x8f, 0xdb, 0x4e, 0xb4, 0xc2, 0xf5, 0x3f, 0x23, 0x7c, 0xeb, 0x1f, - 0x1d, 0x6b, 0x29, 0xb8, 0x06, 0xf7, 0x23, 0xc2, 0x47, 0x32, 0x51, 0x86, 0x25, 0x79, 0xac, 0x47, - 0x94, 0x82, 0x2e, 0x3a, 0x2e, 0xd2, 0x44, 0x64, 0x9b, 0xad, 0x91, 0xa6, 0x5d, 0x68, 0xd1, 0x03, - 0x4b, 0x5e, 0x26, 0xbc, 0x26, 0x1b, 0x55, 0x5f, 0xb6, 0xd7, 0xd2, 0x54, 0xb9, 0x77, 0x8b, 0xb5, - 0xbc, 0x03, 0x6a, 0x20, 0x5d, 0x5d, 0x0b, 0xea, 0xee, 0x17, 0x83, 0xb5, 0x17, 0xf5, 0x60, 0xef, - 0xe0, 0xab, 0xa0, 0x94, 0x50, 0xf1, 0x10, 0xb4, 0x4e, 0x32, 0x28, 0x67, 0x7b, 0x39, 0xba, 0x52, - 0x16, 0x9f, 0xd9, 0x5a, 0xef, 0x1b, 0xc2, 0x47, 0xad, 0x89, 0xb8, 0x5f, 0x11, 0x3e, 0xb0, 0x31, - 0xdc, 0x9d, 0xb4, 0xde, 0xfc, 0xb7, 0x3a, 0x83, 0x9d, 0x32, 0xed, 0xf6, 0x7c, 0xa7, 0xff, 0x1b, - 0x4d, 0xe7, 0x1e, 0x9a, 0xcd, 0x3d, 0xf4, 0x73, 0xee, 0xa1, 0x4f, 0x0b, 0xcf, 0x99, 0x2d, 0x3c, - 0xe7, 0xfb, 0xc2, 0x73, 0xf0, 0x7d, 0x26, 0xb6, 0xf2, 0xec, 0x1f, 0xb7, 0xec, 0xc2, 0x42, 0x16, - 0xa2, 0x57, 0xaf, 0xb3, 0x36, 0x90, 0x35, 0xde, 0x84, 0x34, 0x31, 0x49, 0xc0, 0xb8, 0x01, 0xc5, - 0x93, 0x3c, 0x28, 0x4f, 0xa5, 0x63, 0x06, 0x7c, 0xe3, 0xd3, 0xf1, 0x65, 0xef, 0xfc, 0xb9, 0x04, - 0xfe, 0xa2, 0x46, 0x97, 0xa6, 0xe4, 0x71, 0x9d, 0xb5, 0xca, 0x44, 0x5e, 0x9e, 0x5d, 0xfc, 0x95, - 0xbd, 0x39, 0x28, 0x1d, 0xee, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0x40, 0xb9, 0xb5, 0x6e, 0xb0, - 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ProfilesServiceClient is the client API for ProfilesService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ProfilesServiceClient interface { - Export(ctx context.Context, in *ExportProfilesServiceRequest, opts ...grpc.CallOption) (*ExportProfilesServiceResponse, error) -} - -type profilesServiceClient struct { - cc *grpc.ClientConn -} - -func NewProfilesServiceClient(cc *grpc.ClientConn) ProfilesServiceClient { - return &profilesServiceClient{cc} -} - -func (c *profilesServiceClient) Export(ctx context.Context, in *ExportProfilesServiceRequest, opts ...grpc.CallOption) (*ExportProfilesServiceResponse, error) { - out := new(ExportProfilesServiceResponse) - err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ProfilesServiceServer is the server API for ProfilesService service. -type ProfilesServiceServer interface { - Export(context.Context, *ExportProfilesServiceRequest) (*ExportProfilesServiceResponse, error) -} - -// UnimplementedProfilesServiceServer can be embedded to have forward compatible implementations. -type UnimplementedProfilesServiceServer struct { -} - -func (*UnimplementedProfilesServiceServer) Export(ctx context.Context, req *ExportProfilesServiceRequest) (*ExportProfilesServiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") -} - -func RegisterProfilesServiceServer(s *grpc.Server, srv ProfilesServiceServer) { - s.RegisterService(&_ProfilesService_serviceDesc, srv) -} - -func _ProfilesService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExportProfilesServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProfilesServiceServer).Export(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProfilesServiceServer).Export(ctx, req.(*ExportProfilesServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _ProfilesService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opentelemetry.proto.collector.profiles.v1development.ProfilesService", - HandlerType: (*ProfilesServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Export", - Handler: _ProfilesService_Export_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "opentelemetry/proto/collector/profiles/v1development/profiles_service.proto", -} - -func (m *ExportProfilesServiceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportProfilesServiceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportProfilesServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Dictionary.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfilesService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.ResourceProfiles) > 0 { - for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfilesService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ExportProfilesServiceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportProfilesServiceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportProfilesServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfilesService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ExportProfilesPartialSuccess) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportProfilesPartialSuccess) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportProfilesPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ErrorMessage) > 0 { - i -= len(m.ErrorMessage) - copy(dAtA[i:], m.ErrorMessage) - i = encodeVarintProfilesService(dAtA, i, uint64(len(m.ErrorMessage))) - i-- - dAtA[i] = 0x12 - } - if m.RejectedProfiles != 0 { - i = encodeVarintProfilesService(dAtA, i, uint64(m.RejectedProfiles)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintProfilesService(dAtA []byte, offset int, v uint64) int { - offset -= sovProfilesService(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ExportProfilesServiceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceProfiles) > 0 { - for _, e := range m.ResourceProfiles { - l = e.Size() - n += 1 + l + sovProfilesService(uint64(l)) - } - } - l = m.Dictionary.Size() - n += 1 + l + sovProfilesService(uint64(l)) - return n -} - -func (m *ExportProfilesServiceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.PartialSuccess.Size() - n += 1 + l + sovProfilesService(uint64(l)) - return n -} - -func (m *ExportProfilesPartialSuccess) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RejectedProfiles != 0 { - n += 1 + sovProfilesService(uint64(m.RejectedProfiles)) - } - l = len(m.ErrorMessage) - if l > 0 { - n += 1 + l + sovProfilesService(uint64(l)) - } - return n -} - -func sovProfilesService(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozProfilesService(x uint64) (n int) { - return sovProfilesService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ExportProfilesServiceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfilesService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportProfilesServiceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportProfilesServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfilesService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfilesService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfilesService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceProfiles = append(m.ResourceProfiles, &v1development.ResourceProfiles{}) - if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfilesService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfilesService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfilesService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Dictionary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfilesService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfilesService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportProfilesServiceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfilesService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportProfilesServiceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportProfilesServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfilesService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfilesService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfilesService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfilesService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfilesService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportProfilesPartialSuccess) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfilesService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportProfilesPartialSuccess: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportProfilesPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RejectedProfiles", wireType) - } - m.RejectedProfiles = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfilesService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RejectedProfiles |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfilesService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProfilesService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProfilesService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ErrorMessage = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfilesService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfilesService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipProfilesService(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProfilesService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProfilesService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProfilesService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthProfilesService - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupProfilesService - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthProfilesService - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthProfilesService = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowProfilesService = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupProfilesService = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1/trace_service.pb.go deleted file mode 100644 index 5b547b8a7a9..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1/trace_service.pb.go +++ /dev/null @@ -1,839 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/collector/trace/v1/trace_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - - v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ExportTraceServiceRequest struct { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` -} - -func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } -func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportTraceServiceRequest) ProtoMessage() {} -func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192a962890318cf4, []int{0} -} -func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) -} -func (m *ExportTraceServiceRequest) XXX_Size() int { - return m.Size() -} -func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo - -func (m *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans { - if m != nil { - return m.ResourceSpans - } - return nil -} - -type ExportTraceServiceResponse struct { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - PartialSuccess ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"` -} - -func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } -func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportTraceServiceResponse) ProtoMessage() {} -func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_192a962890318cf4, []int{1} -} -func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) -} -func (m *ExportTraceServiceResponse) XXX_Size() int { - return m.Size() -} -func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo - -func (m *ExportTraceServiceResponse) GetPartialSuccess() ExportTracePartialSuccess { - if m != nil { - return m.PartialSuccess - } - return ExportTracePartialSuccess{} -} - -type ExportTracePartialSuccess struct { - // The number of rejected spans. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"` - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (m *ExportTracePartialSuccess) Reset() { *m = ExportTracePartialSuccess{} } -func (m *ExportTracePartialSuccess) String() string { return proto.CompactTextString(m) } -func (*ExportTracePartialSuccess) ProtoMessage() {} -func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) { - return fileDescriptor_192a962890318cf4, []int{2} -} -func (m *ExportTracePartialSuccess) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportTracePartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportTracePartialSuccess.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportTracePartialSuccess) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportTracePartialSuccess.Merge(m, src) -} -func (m *ExportTracePartialSuccess) XXX_Size() int { - return m.Size() -} -func (m *ExportTracePartialSuccess) XXX_DiscardUnknown() { - xxx_messageInfo_ExportTracePartialSuccess.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportTracePartialSuccess proto.InternalMessageInfo - -func (m *ExportTracePartialSuccess) GetRejectedSpans() int64 { - if m != nil { - return m.RejectedSpans - } - return 0 -} - -func (m *ExportTracePartialSuccess) GetErrorMessage() string { - if m != nil { - return m.ErrorMessage - } - return "" -} - -func init() { - proto.RegisterType((*ExportTraceServiceRequest)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest") - proto.RegisterType((*ExportTraceServiceResponse)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse") - proto.RegisterType((*ExportTracePartialSuccess)(nil), "opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/collector/trace/v1/trace_service.proto", fileDescriptor_192a962890318cf4) -} - -var fileDescriptor_192a962890318cf4 = []byte{ - // 413 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4f, 0xeb, 0xd3, 0x30, - 0x18, 0x6e, 0x36, 0x19, 0x98, 0xfd, 0x11, 0x8b, 0x87, 0xd9, 0x43, 0x1d, 0x15, 0x47, 0x45, 0x48, - 0xd9, 0xbc, 0x79, 0xb3, 0xe2, 0x71, 0x38, 0xba, 0xe1, 0xc1, 0xcb, 0x88, 0xdd, 0x4b, 0xa9, 0x74, - 0x4d, 0x4c, 0xb2, 0xa1, 0x5f, 0x42, 0xf4, 0x2b, 0x78, 0xf4, 0x93, 0xec, 0xb8, 0xa3, 0x27, 0x91, - 0xed, 0x8b, 0x48, 0x12, 0x2d, 0xad, 0xf4, 0x30, 0x7e, 0xbf, 0x5b, 0xf2, 0xf0, 0x3e, 0x7f, 0xde, - 0x27, 0x04, 0xbf, 0x60, 0x1c, 0x4a, 0x05, 0x05, 0xec, 0x40, 0x89, 0xcf, 0x11, 0x17, 0x4c, 0xb1, - 0x28, 0x65, 0x45, 0x01, 0xa9, 0x62, 0x22, 0x52, 0x82, 0xa6, 0x10, 0x1d, 0x66, 0xf6, 0xb0, 0x91, - 0x20, 0x0e, 0x79, 0x0a, 0xc4, 0x8c, 0xb9, 0xd3, 0x06, 0xd7, 0x82, 0xa4, 0xe2, 0x12, 0x43, 0x21, - 0x87, 0x99, 0xf7, 0x20, 0x63, 0x19, 0xb3, 0xca, 0xfa, 0x64, 0x07, 0xbd, 0xb0, 0xcd, 0xb9, 0xe9, - 0x67, 0x27, 0x03, 0x86, 0x1f, 0xbe, 0xfe, 0xc4, 0x99, 0x50, 0x6b, 0x0d, 0xae, 0x6c, 0x86, 0x04, - 0x3e, 0xee, 0x41, 0x2a, 0x37, 0xc1, 0x23, 0x01, 0x92, 0xed, 0x85, 0x8e, 0xc7, 0x69, 0x29, 0xc7, - 0x68, 0xd2, 0x0d, 0xfb, 0xf3, 0x67, 0xa4, 0x2d, 0xdd, 0xbf, 0x4c, 0x24, 0xf9, 0xcb, 0x59, 0x69, - 0x4a, 0x32, 0x14, 0xf5, 0x6b, 0xf0, 0x05, 0x61, 0xaf, 0xcd, 0x51, 0x72, 0x56, 0x4a, 0x70, 0x39, - 0xbe, 0xc7, 0xa9, 0x50, 0x39, 0x2d, 0x36, 0x72, 0x9f, 0xa6, 0x20, 0xb5, 0x27, 0x0a, 0xfb, 0xf3, - 0x97, 0xe4, 0xba, 0x46, 0x48, 0x4d, 0x7c, 0x69, 0x95, 0x56, 0x56, 0x28, 0xbe, 0x73, 0xfc, 0xf5, - 0xc8, 0x49, 0x46, 0xbc, 0x81, 0x06, 0x59, 0xa3, 0x81, 0x26, 0xc5, 0x7d, 0xa2, 0x1b, 0xf8, 0x00, - 0xa9, 0x82, 0x6d, 0xd5, 0x00, 0x0a, 0xbb, 0x7a, 0x29, 0x8b, 0x9a, 0xa5, 0xdc, 0xc7, 0x78, 0x08, - 0x42, 0x30, 0xb1, 0xd9, 0x81, 0x94, 0x34, 0x83, 0x71, 0x67, 0x82, 0xc2, 0xbb, 0xc9, 0xc0, 0x80, - 0x0b, 0x8b, 0xcd, 0xbf, 0x23, 0x3c, 0xa8, 0xef, 0xec, 0x7e, 0x43, 0xb8, 0x67, 0xad, 0xdd, 0x9b, - 0x6c, 0xd7, 0x7c, 0x2c, 0x2f, 0xbe, 0x8d, 0x84, 0x6d, 0x3f, 0x70, 0xe2, 0x13, 0x3a, 0x9e, 0x7d, - 0x74, 0x3a, 0xfb, 0xe8, 0xf7, 0xd9, 0x47, 0x5f, 0x2f, 0xbe, 0x73, 0xba, 0xf8, 0xce, 0xcf, 0x8b, - 0xef, 0xe0, 0xa7, 0x39, 0xbb, 0xd2, 0x22, 0xbe, 0x5f, 0x57, 0x5f, 0xea, 0xa9, 0x25, 0x7a, 0xb7, - 0xc8, 0xfe, 0xe7, 0xe7, 0xf5, 0xef, 0xc0, 0xb7, 0x54, 0xd1, 0x28, 0x2f, 0x15, 0x88, 0x92, 0x16, - 0x91, 0xb9, 0x19, 0x83, 0x0c, 0xca, 0x96, 0x5f, 0xf3, 0xa3, 0x33, 0x7d, 0xc3, 0xa1, 0x5c, 0x57, - 0x62, 0xc6, 0x86, 0xbc, 0xaa, 0xc2, 0x98, 0x08, 0xe4, 0xed, 0xec, 0x7d, 0xcf, 0xa8, 0x3c, 0xff, - 0x13, 0x00, 0x00, 0xff, 0xff, 0x82, 0xce, 0x78, 0xc7, 0x8f, 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// TraceServiceClient is the client API for TraceService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type TraceServiceClient interface { - Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) -} - -type traceServiceClient struct { - cc *grpc.ClientConn -} - -func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { - return &traceServiceClient{cc} -} - -func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) { - out := new(ExportTraceServiceResponse) - err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// TraceServiceServer is the server API for TraceService service. -type TraceServiceServer interface { - Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) -} - -// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations. -type UnimplementedTraceServiceServer struct { -} - -func (*UnimplementedTraceServiceServer) Export(ctx context.Context, req *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") -} - -func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { - s.RegisterService(&_TraceService_serviceDesc, srv) -} - -func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExportTraceServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TraceServiceServer).Export(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _TraceService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService", - HandlerType: (*TraceServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Export", - Handler: _TraceService_Export_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto", -} - -func (m *ExportTraceServiceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportTraceServiceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportTraceServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceSpans) > 0 { - for iNdEx := len(m.ResourceSpans) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTraceService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ExportTraceServiceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportTraceServiceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportTraceServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTraceService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ExportTracePartialSuccess) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportTracePartialSuccess) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportTracePartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ErrorMessage) > 0 { - i -= len(m.ErrorMessage) - copy(dAtA[i:], m.ErrorMessage) - i = encodeVarintTraceService(dAtA, i, uint64(len(m.ErrorMessage))) - i-- - dAtA[i] = 0x12 - } - if m.RejectedSpans != 0 { - i = encodeVarintTraceService(dAtA, i, uint64(m.RejectedSpans)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintTraceService(dAtA []byte, offset int, v uint64) int { - offset -= sovTraceService(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ExportTraceServiceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceSpans) > 0 { - for _, e := range m.ResourceSpans { - l = e.Size() - n += 1 + l + sovTraceService(uint64(l)) - } - } - return n -} - -func (m *ExportTraceServiceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.PartialSuccess.Size() - n += 1 + l + sovTraceService(uint64(l)) - return n -} - -func (m *ExportTracePartialSuccess) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RejectedSpans != 0 { - n += 1 + sovTraceService(uint64(m.RejectedSpans)) - } - l = len(m.ErrorMessage) - if l > 0 { - n += 1 + l + sovTraceService(uint64(l)) - } - return n -} - -func sovTraceService(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTraceService(x uint64) (n int) { - return sovTraceService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ExportTraceServiceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportTraceServiceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportTraceServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTraceService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTraceService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceSpans = append(m.ResourceSpans, &v1.ResourceSpans{}) - if err := m.ResourceSpans[len(m.ResourceSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTraceService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTraceService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportTraceServiceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportTraceServiceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportTraceServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTraceService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTraceService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTraceService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTraceService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportTracePartialSuccess) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportTracePartialSuccess: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportTracePartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RejectedSpans", wireType) - } - m.RejectedSpans = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RejectedSpans |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTraceService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTraceService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ErrorMessage = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTraceService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTraceService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTraceService(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTraceService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTraceService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTraceService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTraceService - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTraceService - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTraceService - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTraceService = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTraceService = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTraceService = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1/common.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1/common.pb.go deleted file mode 100644 index 179aa9d5d95..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1/common.pb.go +++ /dev/null @@ -1,2080 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/common/v1/common.proto - -package v1 - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// AnyValue is used to represent any type of attribute value. AnyValue may contain a -// primitive value such as a string or integer or it may contain an arbitrary nested -// object containing arrays, key-value lists and primitives. -type AnyValue struct { - // The value is one of the listed fields. It is valid for all values to be unspecified - // in which case this AnyValue is considered to be "empty". - // - // Types that are valid to be assigned to Value: - // *AnyValue_StringValue - // *AnyValue_BoolValue - // *AnyValue_IntValue - // *AnyValue_DoubleValue - // *AnyValue_ArrayValue - // *AnyValue_KvlistValue - // *AnyValue_BytesValue - Value isAnyValue_Value `protobuf_oneof:"value"` -} - -func (m *AnyValue) Reset() { *m = AnyValue{} } -func (m *AnyValue) String() string { return proto.CompactTextString(m) } -func (*AnyValue) ProtoMessage() {} -func (*AnyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{0} -} -func (m *AnyValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AnyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AnyValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AnyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_AnyValue.Merge(m, src) -} -func (m *AnyValue) XXX_Size() int { - return m.Size() -} -func (m *AnyValue) XXX_DiscardUnknown() { - xxx_messageInfo_AnyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_AnyValue proto.InternalMessageInfo - -type isAnyValue_Value interface { - isAnyValue_Value() - MarshalTo([]byte) (int, error) - Size() int -} - -type AnyValue_StringValue struct { - StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"` -} -type AnyValue_BoolValue struct { - BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"` -} -type AnyValue_IntValue struct { - IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` -} -type AnyValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"` -} -type AnyValue_ArrayValue struct { - ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof" json:"array_value,omitempty"` -} -type AnyValue_KvlistValue struct { - KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof" json:"kvlist_value,omitempty"` -} -type AnyValue_BytesValue struct { - BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof" json:"bytes_value,omitempty"` -} - -func (*AnyValue_StringValue) isAnyValue_Value() {} -func (*AnyValue_BoolValue) isAnyValue_Value() {} -func (*AnyValue_IntValue) isAnyValue_Value() {} -func (*AnyValue_DoubleValue) isAnyValue_Value() {} -func (*AnyValue_ArrayValue) isAnyValue_Value() {} -func (*AnyValue_KvlistValue) isAnyValue_Value() {} -func (*AnyValue_BytesValue) isAnyValue_Value() {} - -func (m *AnyValue) GetValue() isAnyValue_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *AnyValue) GetStringValue() string { - if x, ok := m.GetValue().(*AnyValue_StringValue); ok { - return x.StringValue - } - return "" -} - -func (m *AnyValue) GetBoolValue() bool { - if x, ok := m.GetValue().(*AnyValue_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (m *AnyValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*AnyValue_IntValue); ok { - return x.IntValue - } - return 0 -} - -func (m *AnyValue) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*AnyValue_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (m *AnyValue) GetArrayValue() *ArrayValue { - if x, ok := m.GetValue().(*AnyValue_ArrayValue); ok { - return x.ArrayValue - } - return nil -} - -func (m *AnyValue) GetKvlistValue() *KeyValueList { - if x, ok := m.GetValue().(*AnyValue_KvlistValue); ok { - return x.KvlistValue - } - return nil -} - -func (m *AnyValue) GetBytesValue() []byte { - if x, ok := m.GetValue().(*AnyValue_BytesValue); ok { - return x.BytesValue - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*AnyValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*AnyValue_StringValue)(nil), - (*AnyValue_BoolValue)(nil), - (*AnyValue_IntValue)(nil), - (*AnyValue_DoubleValue)(nil), - (*AnyValue_ArrayValue)(nil), - (*AnyValue_KvlistValue)(nil), - (*AnyValue_BytesValue)(nil), - } -} - -// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message -// since oneof in AnyValue does not allow repeated fields. -type ArrayValue struct { - // Array of values. The array may be empty (contain 0 elements). - Values []AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values"` -} - -func (m *ArrayValue) Reset() { *m = ArrayValue{} } -func (m *ArrayValue) String() string { return proto.CompactTextString(m) } -func (*ArrayValue) ProtoMessage() {} -func (*ArrayValue) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{1} -} -func (m *ArrayValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArrayValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ArrayValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ArrayValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArrayValue.Merge(m, src) -} -func (m *ArrayValue) XXX_Size() int { - return m.Size() -} -func (m *ArrayValue) XXX_DiscardUnknown() { - xxx_messageInfo_ArrayValue.DiscardUnknown(m) -} - -var xxx_messageInfo_ArrayValue proto.InternalMessageInfo - -func (m *ArrayValue) GetValues() []AnyValue { - if m != nil { - return m.Values - } - return nil -} - -// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message -// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need -// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to -// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches -// are semantically equivalent. -type KeyValueList struct { - // A collection of key/value pairs of key-value pairs. The list may be empty (may - // contain 0 elements). - // The keys MUST be unique (it is not allowed to have more than one - // value with the same key). - Values []KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values"` -} - -func (m *KeyValueList) Reset() { *m = KeyValueList{} } -func (m *KeyValueList) String() string { return proto.CompactTextString(m) } -func (*KeyValueList) ProtoMessage() {} -func (*KeyValueList) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{2} -} -func (m *KeyValueList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KeyValueList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KeyValueList.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KeyValueList) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyValueList.Merge(m, src) -} -func (m *KeyValueList) XXX_Size() int { - return m.Size() -} -func (m *KeyValueList) XXX_DiscardUnknown() { - xxx_messageInfo_KeyValueList.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyValueList proto.InternalMessageInfo - -func (m *KeyValueList) GetValues() []KeyValue { - if m != nil { - return m.Values - } - return nil -} - -// KeyValue is a key-value pair that is used to store Span attributes, Link -// attributes, etc. -type KeyValue struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{3} -} -func (m *KeyValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KeyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyValue.Merge(m, src) -} -func (m *KeyValue) XXX_Size() int { - return m.Size() -} -func (m *KeyValue) XXX_DiscardUnknown() { - xxx_messageInfo_KeyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyValue proto.InternalMessageInfo - -func (m *KeyValue) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *KeyValue) GetValue() AnyValue { - if m != nil { - return m.Value - } - return AnyValue{} -} - -// InstrumentationScope is a message representing the instrumentation scope information -// such as the fully qualified name and version. -type InstrumentationScope struct { - // An empty instrumentation scope name means the name is unknown. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // Additional attributes that describe the scope. [Optional]. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attributes []KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes"` - DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` -} - -func (m *InstrumentationScope) Reset() { *m = InstrumentationScope{} } -func (m *InstrumentationScope) String() string { return proto.CompactTextString(m) } -func (*InstrumentationScope) ProtoMessage() {} -func (*InstrumentationScope) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{4} -} -func (m *InstrumentationScope) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InstrumentationScope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InstrumentationScope.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InstrumentationScope) XXX_Merge(src proto.Message) { - xxx_messageInfo_InstrumentationScope.Merge(m, src) -} -func (m *InstrumentationScope) XXX_Size() int { - return m.Size() -} -func (m *InstrumentationScope) XXX_DiscardUnknown() { - xxx_messageInfo_InstrumentationScope.DiscardUnknown(m) -} - -var xxx_messageInfo_InstrumentationScope proto.InternalMessageInfo - -func (m *InstrumentationScope) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *InstrumentationScope) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *InstrumentationScope) GetAttributes() []KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *InstrumentationScope) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -// A reference to an Entity. -// Entity represents an object of interest associated with produced telemetry: e.g spans, metrics, profiles, or logs. -// -// Status: [Development] -type EntityRef struct { - // The Schema URL, if known. This is the identifier of the Schema that the entity data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // - // This schema_url applies to the data in this message and to the Resource attributes - // referenced by id_keys and description_keys. - // TODO: discuss if we are happy with this somewhat complicated definition of what - // the schema_url applies to. - // - // This field obsoletes the schema_url field in ResourceMetrics/ResourceSpans/ResourceLogs. - SchemaUrl string `protobuf:"bytes,1,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` - // Defines the type of the entity. MUST not change during the lifetime of the entity. - // For example: "service" or "host". This field is required and MUST not be empty - // for valid entities. - Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` - // Attribute Keys that identify the entity. - // MUST not change during the lifetime of the entity. The Id must contain at least one attribute. - // These keys MUST exist in the containing {message}.attributes. - IdKeys []string `protobuf:"bytes,3,rep,name=id_keys,json=idKeys,proto3" json:"id_keys,omitempty"` - // Descriptive (non-identifying) attribute keys of the entity. - // MAY change over the lifetime of the entity. MAY be empty. - // These attribute keys are not part of entity's identity. - // These keys MUST exist in the containing {message}.attributes. - DescriptionKeys []string `protobuf:"bytes,4,rep,name=description_keys,json=descriptionKeys,proto3" json:"description_keys,omitempty"` -} - -func (m *EntityRef) Reset() { *m = EntityRef{} } -func (m *EntityRef) String() string { return proto.CompactTextString(m) } -func (*EntityRef) ProtoMessage() {} -func (*EntityRef) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{5} -} -func (m *EntityRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EntityRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EntityRef.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *EntityRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_EntityRef.Merge(m, src) -} -func (m *EntityRef) XXX_Size() int { - return m.Size() -} -func (m *EntityRef) XXX_DiscardUnknown() { - xxx_messageInfo_EntityRef.DiscardUnknown(m) -} - -var xxx_messageInfo_EntityRef proto.InternalMessageInfo - -func (m *EntityRef) GetSchemaUrl() string { - if m != nil { - return m.SchemaUrl - } - return "" -} - -func (m *EntityRef) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *EntityRef) GetIdKeys() []string { - if m != nil { - return m.IdKeys - } - return nil -} - -func (m *EntityRef) GetDescriptionKeys() []string { - if m != nil { - return m.DescriptionKeys - } - return nil -} - -func init() { - proto.RegisterType((*AnyValue)(nil), "opentelemetry.proto.common.v1.AnyValue") - proto.RegisterType((*ArrayValue)(nil), "opentelemetry.proto.common.v1.ArrayValue") - proto.RegisterType((*KeyValueList)(nil), "opentelemetry.proto.common.v1.KeyValueList") - proto.RegisterType((*KeyValue)(nil), "opentelemetry.proto.common.v1.KeyValue") - proto.RegisterType((*InstrumentationScope)(nil), "opentelemetry.proto.common.v1.InstrumentationScope") - proto.RegisterType((*EntityRef)(nil), "opentelemetry.proto.common.v1.EntityRef") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/common/v1/common.proto", fileDescriptor_62ba46dcb97aa817) -} - -var fileDescriptor_62ba46dcb97aa817 = []byte{ - // 608 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4f, 0x4f, 0x13, 0x41, - 0x14, 0xdf, 0xa1, 0xa5, 0xed, 0xbe, 0xd6, 0x48, 0x26, 0x44, 0x1b, 0x93, 0x96, 0xb5, 0x1e, 0x5c, - 0x34, 0x69, 0x03, 0x5e, 0xbc, 0x52, 0x24, 0xa9, 0x01, 0x23, 0x59, 0x84, 0x83, 0x97, 0x66, 0xdb, - 0x7d, 0xd6, 0x09, 0xdb, 0x99, 0xcd, 0xec, 0xb4, 0xc9, 0x5e, 0xfd, 0x04, 0x7e, 0x0e, 0x2f, 0x7e, - 0x0d, 0x2e, 0x26, 0x1c, 0x3d, 0x19, 0x02, 0x5f, 0xc4, 0xcc, 0x9f, 0x16, 0xe4, 0x00, 0xc1, 0xdb, - 0x7b, 0xbf, 0xf7, 0x7b, 0xbf, 0xf7, 0x7e, 0x33, 0x93, 0x81, 0x57, 0x22, 0x43, 0xae, 0x30, 0xc5, - 0x29, 0x2a, 0x59, 0xf4, 0x32, 0x29, 0x94, 0xe8, 0x8d, 0xc5, 0x74, 0x2a, 0x78, 0x6f, 0xbe, 0xe5, - 0xa2, 0xae, 0x81, 0x69, 0xeb, 0x1f, 0xae, 0x05, 0xbb, 0x8e, 0x31, 0xdf, 0x7a, 0xb6, 0x3e, 0x11, - 0x13, 0x61, 0x05, 0x74, 0x64, 0xeb, 0x9d, 0x8b, 0x15, 0xa8, 0xed, 0xf0, 0xe2, 0x24, 0x4e, 0x67, - 0x48, 0x5f, 0x40, 0x23, 0x57, 0x92, 0xf1, 0xc9, 0x70, 0xae, 0xf3, 0x26, 0x09, 0x48, 0xe8, 0x0f, - 0xbc, 0xa8, 0x6e, 0x51, 0x4b, 0xda, 0x00, 0x18, 0x09, 0x91, 0x3a, 0xca, 0x4a, 0x40, 0xc2, 0xda, - 0xc0, 0x8b, 0x7c, 0x8d, 0x59, 0x42, 0x0b, 0x7c, 0xc6, 0x95, 0xab, 0x97, 0x02, 0x12, 0x96, 0x06, - 0x5e, 0x54, 0x63, 0x5c, 0x2d, 0x87, 0x24, 0x62, 0x36, 0x4a, 0xd1, 0x31, 0xca, 0x01, 0x09, 0x89, - 0x1e, 0x62, 0x51, 0x4b, 0x3a, 0x80, 0x7a, 0x2c, 0x65, 0x5c, 0x38, 0xce, 0x6a, 0x40, 0xc2, 0xfa, - 0xf6, 0x66, 0xf7, 0x4e, 0x87, 0xdd, 0x1d, 0xdd, 0x61, 0xfa, 0x07, 0x5e, 0x04, 0xf1, 0x32, 0xa3, - 0x87, 0xd0, 0x38, 0x9d, 0xa7, 0x2c, 0x5f, 0x2c, 0x55, 0x31, 0x72, 0xaf, 0xef, 0x91, 0xdb, 0x47, - 0xdb, 0x7e, 0xc0, 0x72, 0xa5, 0xf7, 0xb3, 0x12, 0x56, 0xf1, 0x39, 0xd4, 0x47, 0x85, 0xc2, 0xdc, - 0x09, 0x56, 0x03, 0x12, 0x36, 0xf4, 0x50, 0x03, 0x1a, 0x4a, 0xbf, 0x0a, 0xab, 0xa6, 0xd8, 0x39, - 0x02, 0xb8, 0xde, 0x8c, 0xee, 0x41, 0xc5, 0xc0, 0x79, 0x93, 0x04, 0xa5, 0xb0, 0xbe, 0xfd, 0xf2, - 0x3e, 0x53, 0xee, 0x72, 0xfa, 0xe5, 0xb3, 0x3f, 0x1b, 0x5e, 0xe4, 0x9a, 0x3b, 0xc7, 0xd0, 0xb8, - 0xb9, 0xdf, 0x83, 0x65, 0x17, 0xcd, 0xb7, 0x64, 0x63, 0xa8, 0x2d, 0x2a, 0x74, 0x0d, 0x4a, 0xa7, - 0x58, 0xd8, 0x47, 0x10, 0xe9, 0x90, 0xee, 0x3a, 0x4b, 0xe6, 0xd6, 0x1f, 0xbc, 0xba, 0x3b, 0x8e, - 0x5f, 0x04, 0xd6, 0xdf, 0xf3, 0x5c, 0xc9, 0xd9, 0x14, 0xb9, 0x8a, 0x15, 0x13, 0xfc, 0x68, 0x2c, - 0x32, 0xa4, 0x14, 0xca, 0x3c, 0x9e, 0xba, 0x57, 0x17, 0x99, 0x98, 0x36, 0xa1, 0x3a, 0x47, 0x99, - 0x33, 0xc1, 0xcd, 0x4c, 0x3f, 0x5a, 0xa4, 0xf4, 0x03, 0x40, 0xac, 0x94, 0x64, 0xa3, 0x99, 0xc2, - 0xbc, 0x59, 0xfa, 0x1f, 0xd3, 0x37, 0x04, 0xe8, 0x5b, 0x68, 0x26, 0x52, 0x64, 0x19, 0x26, 0xc3, - 0x6b, 0x74, 0x38, 0x16, 0x33, 0xae, 0xcc, 0x0b, 0x7d, 0x14, 0x3d, 0x71, 0xf5, 0x9d, 0x65, 0x79, - 0x57, 0x57, 0x3b, 0xdf, 0x08, 0xf8, 0x7b, 0x5c, 0x31, 0x55, 0x44, 0xf8, 0x85, 0xb6, 0x00, 0xf2, - 0xf1, 0x57, 0x9c, 0xc6, 0xc3, 0x99, 0x4c, 0x9d, 0x15, 0xdf, 0x22, 0xc7, 0x32, 0xd5, 0x1e, 0x55, - 0x91, 0xa1, 0x33, 0x63, 0x62, 0xfa, 0x14, 0xaa, 0x2c, 0x19, 0x9e, 0x62, 0x61, 0x6d, 0xf8, 0x51, - 0x85, 0x25, 0xfb, 0x58, 0xe4, 0x74, 0x13, 0xd6, 0x12, 0xcc, 0xc7, 0x92, 0x65, 0xfa, 0x90, 0x2c, - 0xa3, 0x6c, 0x18, 0x8f, 0x6f, 0xe0, 0x9a, 0xda, 0xff, 0x49, 0xce, 0x2e, 0xdb, 0xe4, 0xfc, 0xb2, - 0x4d, 0x2e, 0x2e, 0xdb, 0xe4, 0xfb, 0x55, 0xdb, 0x3b, 0xbf, 0x6a, 0x7b, 0xbf, 0xaf, 0xda, 0x1e, - 0x04, 0x4c, 0xdc, 0x7d, 0x2c, 0xfd, 0xfa, 0xae, 0x09, 0x0f, 0x35, 0x7c, 0x48, 0x3e, 0xbf, 0x9b, - 0xdc, 0x6e, 0x60, 0xfa, 0xcf, 0x49, 0x53, 0x1c, 0x2b, 0x21, 0x7b, 0x59, 0x12, 0xab, 0xb8, 0xc7, - 0xb8, 0x42, 0xc9, 0xe3, 0xb4, 0x67, 0x32, 0xa3, 0x38, 0x41, 0x7e, 0xfd, 0x35, 0xfd, 0x58, 0x69, - 0x7d, 0xcc, 0x90, 0x7f, 0x5a, 0x6a, 0x18, 0xf5, 0xae, 0x9d, 0xd4, 0x3d, 0xd9, 0x1a, 0x55, 0x4c, - 0xcf, 0x9b, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x8b, 0xd4, 0x3b, 0xe2, 0x04, 0x00, 0x00, -} - -func (m *AnyValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AnyValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Value != nil { - { - size := m.Value.Size() - i -= size - if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *AnyValue_StringValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= len(m.StringValue) - copy(dAtA[i:], m.StringValue) - i = encodeVarintCommon(dAtA, i, uint64(len(m.StringValue))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} -func (m *AnyValue_BoolValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i-- - if m.BoolValue { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - return len(dAtA) - i, nil -} -func (m *AnyValue_IntValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue_IntValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintCommon(dAtA, i, uint64(m.IntValue)) - i-- - dAtA[i] = 0x18 - return len(dAtA) - i, nil -} -func (m *AnyValue_DoubleValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue_DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue)))) - i-- - dAtA[i] = 0x21 - return len(dAtA) - i, nil -} -func (m *AnyValue_ArrayValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue_ArrayValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ArrayValue != nil { - { - size, err := m.ArrayValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - return len(dAtA) - i, nil -} -func (m *AnyValue_KvlistValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue_KvlistValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.KvlistValue != nil { - { - size, err := m.KvlistValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} -func (m *AnyValue_BytesValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue_BytesValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.BytesValue != nil { - i -= len(m.BytesValue) - copy(dAtA[i:], m.BytesValue) - i = encodeVarintCommon(dAtA, i, uint64(len(m.BytesValue))) - i-- - dAtA[i] = 0x3a - } - return len(dAtA) - i, nil -} -func (m *ArrayValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArrayValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArrayValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *KeyValueList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KeyValueList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KeyValueList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *KeyValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *InstrumentationScope) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InstrumentationScope) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InstrumentationScope) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DroppedAttributesCount != 0 { - i = encodeVarintCommon(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x20 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *EntityRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EntityRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EntityRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.DescriptionKeys) > 0 { - for iNdEx := len(m.DescriptionKeys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DescriptionKeys[iNdEx]) - copy(dAtA[i:], m.DescriptionKeys[iNdEx]) - i = encodeVarintCommon(dAtA, i, uint64(len(m.DescriptionKeys[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.IdKeys) > 0 { - for iNdEx := len(m.IdKeys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.IdKeys[iNdEx]) - copy(dAtA[i:], m.IdKeys[iNdEx]) - i = encodeVarintCommon(dAtA, i, uint64(len(m.IdKeys[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x12 - } - if len(m.SchemaUrl) > 0 { - i -= len(m.SchemaUrl) - copy(dAtA[i:], m.SchemaUrl) - i = encodeVarintCommon(dAtA, i, uint64(len(m.SchemaUrl))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintCommon(dAtA []byte, offset int, v uint64) int { - offset -= sovCommon(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AnyValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != nil { - n += m.Value.Size() - } - return n -} - -func (m *AnyValue_StringValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.StringValue) - n += 1 + l + sovCommon(uint64(l)) - return n -} -func (m *AnyValue_BoolValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - return n -} -func (m *AnyValue_IntValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovCommon(uint64(m.IntValue)) - return n -} -func (m *AnyValue_DoubleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *AnyValue_ArrayValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ArrayValue != nil { - l = m.ArrayValue.Size() - n += 1 + l + sovCommon(uint64(l)) - } - return n -} -func (m *AnyValue_KvlistValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.KvlistValue != nil { - l = m.KvlistValue.Size() - n += 1 + l + sovCommon(uint64(l)) - } - return n -} -func (m *AnyValue_BytesValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BytesValue != nil { - l = len(m.BytesValue) - n += 1 + l + sovCommon(uint64(l)) - } - return n -} -func (m *ArrayValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Values) > 0 { - for _, e := range m.Values { - l = e.Size() - n += 1 + l + sovCommon(uint64(l)) - } - } - return n -} - -func (m *KeyValueList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Values) > 0 { - for _, e := range m.Values { - l = e.Size() - n += 1 + l + sovCommon(uint64(l)) - } - } - return n -} - -func (m *KeyValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - l = m.Value.Size() - n += 1 + l + sovCommon(uint64(l)) - return n -} - -func (m *InstrumentationScope) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovCommon(uint64(l)) - } - } - if m.DroppedAttributesCount != 0 { - n += 1 + sovCommon(uint64(m.DroppedAttributesCount)) - } - return n -} - -func (m *EntityRef) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.SchemaUrl) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - if len(m.IdKeys) > 0 { - for _, s := range m.IdKeys { - l = len(s) - n += 1 + l + sovCommon(uint64(l)) - } - } - if len(m.DescriptionKeys) > 0 { - for _, s := range m.DescriptionKeys { - l = len(s) - n += 1 + l + sovCommon(uint64(l)) - } - } - return n -} - -func sovCommon(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCommon(x uint64) (n int) { - return sovCommon(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *AnyValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AnyValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AnyValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = &AnyValue_StringValue{string(dAtA[iNdEx:postIndex])} - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Value = &AnyValue_BoolValue{b} - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Value = &AnyValue_IntValue{v} - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = &AnyValue_DoubleValue{float64(math.Float64frombits(v))} - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ArrayValue{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &AnyValue_ArrayValue{v} - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &KeyValueList{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &AnyValue_KvlistValue{v} - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BytesValue", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := make([]byte, postIndex-iNdEx) - copy(v, dAtA[iNdEx:postIndex]) - m.Value = &AnyValue_BytesValue{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArrayValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArrayValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArrayValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, AnyValue{}) - if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KeyValueList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KeyValueList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KeyValueList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, KeyValue{}) - if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KeyValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InstrumentationScope) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InstrumentationScope: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InstrumentationScope: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EntityRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EntityRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EntityRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IdKeys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IdKeys = append(m.IdKeys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DescriptionKeys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DescriptionKeys = append(m.DescriptionKeys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCommon(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommon - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommon - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommon - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCommon - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCommon - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCommon - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthCommon = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCommon = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCommon = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go deleted file mode 100644 index da266d167d2..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go +++ /dev/null @@ -1,1834 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/logs/v1/logs.proto - -package v1 - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - - go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data" - v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Possible values for LogRecord.SeverityNumber. -type SeverityNumber int32 - -const ( - // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. - SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED SeverityNumber = 0 - SeverityNumber_SEVERITY_NUMBER_TRACE SeverityNumber = 1 - SeverityNumber_SEVERITY_NUMBER_TRACE2 SeverityNumber = 2 - SeverityNumber_SEVERITY_NUMBER_TRACE3 SeverityNumber = 3 - SeverityNumber_SEVERITY_NUMBER_TRACE4 SeverityNumber = 4 - SeverityNumber_SEVERITY_NUMBER_DEBUG SeverityNumber = 5 - SeverityNumber_SEVERITY_NUMBER_DEBUG2 SeverityNumber = 6 - SeverityNumber_SEVERITY_NUMBER_DEBUG3 SeverityNumber = 7 - SeverityNumber_SEVERITY_NUMBER_DEBUG4 SeverityNumber = 8 - SeverityNumber_SEVERITY_NUMBER_INFO SeverityNumber = 9 - SeverityNumber_SEVERITY_NUMBER_INFO2 SeverityNumber = 10 - SeverityNumber_SEVERITY_NUMBER_INFO3 SeverityNumber = 11 - SeverityNumber_SEVERITY_NUMBER_INFO4 SeverityNumber = 12 - SeverityNumber_SEVERITY_NUMBER_WARN SeverityNumber = 13 - SeverityNumber_SEVERITY_NUMBER_WARN2 SeverityNumber = 14 - SeverityNumber_SEVERITY_NUMBER_WARN3 SeverityNumber = 15 - SeverityNumber_SEVERITY_NUMBER_WARN4 SeverityNumber = 16 - SeverityNumber_SEVERITY_NUMBER_ERROR SeverityNumber = 17 - SeverityNumber_SEVERITY_NUMBER_ERROR2 SeverityNumber = 18 - SeverityNumber_SEVERITY_NUMBER_ERROR3 SeverityNumber = 19 - SeverityNumber_SEVERITY_NUMBER_ERROR4 SeverityNumber = 20 - SeverityNumber_SEVERITY_NUMBER_FATAL SeverityNumber = 21 - SeverityNumber_SEVERITY_NUMBER_FATAL2 SeverityNumber = 22 - SeverityNumber_SEVERITY_NUMBER_FATAL3 SeverityNumber = 23 - SeverityNumber_SEVERITY_NUMBER_FATAL4 SeverityNumber = 24 -) - -var SeverityNumber_name = map[int32]string{ - 0: "SEVERITY_NUMBER_UNSPECIFIED", - 1: "SEVERITY_NUMBER_TRACE", - 2: "SEVERITY_NUMBER_TRACE2", - 3: "SEVERITY_NUMBER_TRACE3", - 4: "SEVERITY_NUMBER_TRACE4", - 5: "SEVERITY_NUMBER_DEBUG", - 6: "SEVERITY_NUMBER_DEBUG2", - 7: "SEVERITY_NUMBER_DEBUG3", - 8: "SEVERITY_NUMBER_DEBUG4", - 9: "SEVERITY_NUMBER_INFO", - 10: "SEVERITY_NUMBER_INFO2", - 11: "SEVERITY_NUMBER_INFO3", - 12: "SEVERITY_NUMBER_INFO4", - 13: "SEVERITY_NUMBER_WARN", - 14: "SEVERITY_NUMBER_WARN2", - 15: "SEVERITY_NUMBER_WARN3", - 16: "SEVERITY_NUMBER_WARN4", - 17: "SEVERITY_NUMBER_ERROR", - 18: "SEVERITY_NUMBER_ERROR2", - 19: "SEVERITY_NUMBER_ERROR3", - 20: "SEVERITY_NUMBER_ERROR4", - 21: "SEVERITY_NUMBER_FATAL", - 22: "SEVERITY_NUMBER_FATAL2", - 23: "SEVERITY_NUMBER_FATAL3", - 24: "SEVERITY_NUMBER_FATAL4", -} - -var SeverityNumber_value = map[string]int32{ - "SEVERITY_NUMBER_UNSPECIFIED": 0, - "SEVERITY_NUMBER_TRACE": 1, - "SEVERITY_NUMBER_TRACE2": 2, - "SEVERITY_NUMBER_TRACE3": 3, - "SEVERITY_NUMBER_TRACE4": 4, - "SEVERITY_NUMBER_DEBUG": 5, - "SEVERITY_NUMBER_DEBUG2": 6, - "SEVERITY_NUMBER_DEBUG3": 7, - "SEVERITY_NUMBER_DEBUG4": 8, - "SEVERITY_NUMBER_INFO": 9, - "SEVERITY_NUMBER_INFO2": 10, - "SEVERITY_NUMBER_INFO3": 11, - "SEVERITY_NUMBER_INFO4": 12, - "SEVERITY_NUMBER_WARN": 13, - "SEVERITY_NUMBER_WARN2": 14, - "SEVERITY_NUMBER_WARN3": 15, - "SEVERITY_NUMBER_WARN4": 16, - "SEVERITY_NUMBER_ERROR": 17, - "SEVERITY_NUMBER_ERROR2": 18, - "SEVERITY_NUMBER_ERROR3": 19, - "SEVERITY_NUMBER_ERROR4": 20, - "SEVERITY_NUMBER_FATAL": 21, - "SEVERITY_NUMBER_FATAL2": 22, - "SEVERITY_NUMBER_FATAL3": 23, - "SEVERITY_NUMBER_FATAL4": 24, -} - -func (x SeverityNumber) String() string { - return proto.EnumName(SeverityNumber_name, int32(x)) -} - -func (SeverityNumber) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{0} -} - -// LogRecordFlags represents constants used to interpret the -// LogRecord.flags field, which is protobuf 'fixed32' type and is to -// be used as bit-fields. Each non-zero value defined in this enum is -// a bit-mask. To extract the bit-field, for example, use an -// expression like: -// -// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) -type LogRecordFlags int32 - -const ( - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - LogRecordFlags_LOG_RECORD_FLAGS_DO_NOT_USE LogRecordFlags = 0 - // Bits 0-7 are used for trace flags. - LogRecordFlags_LOG_RECORD_FLAGS_TRACE_FLAGS_MASK LogRecordFlags = 255 -) - -var LogRecordFlags_name = map[int32]string{ - 0: "LOG_RECORD_FLAGS_DO_NOT_USE", - 255: "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK", -} - -var LogRecordFlags_value = map[string]int32{ - "LOG_RECORD_FLAGS_DO_NOT_USE": 0, - "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK": 255, -} - -func (x LogRecordFlags) String() string { - return proto.EnumName(LogRecordFlags_name, int32(x)) -} - -func (LogRecordFlags) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{1} -} - -// LogsData represents the logs data that can be stored in a persistent storage, -// OR can be embedded by other protocols that transfer OTLP logs data but do not -// implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -type LogsData struct { - // An array of ResourceLogs. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - ResourceLogs []*ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"` -} - -func (m *LogsData) Reset() { *m = LogsData{} } -func (m *LogsData) String() string { return proto.CompactTextString(m) } -func (*LogsData) ProtoMessage() {} -func (*LogsData) Descriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{0} -} -func (m *LogsData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LogsData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LogsData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LogsData) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogsData.Merge(m, src) -} -func (m *LogsData) XXX_Size() int { - return m.Size() -} -func (m *LogsData) XXX_DiscardUnknown() { - xxx_messageInfo_LogsData.DiscardUnknown(m) -} - -var xxx_messageInfo_LogsData proto.InternalMessageInfo - -func (m *LogsData) GetResourceLogs() []*ResourceLogs { - if m != nil { - return m.ResourceLogs - } - return nil -} - -// A collection of ScopeLogs from a Resource. -type ResourceLogs struct { - DeprecatedScopeLogs []*ScopeLogs `protobuf:"bytes,1000,rep,name=deprecated_scope_logs,json=deprecatedScopeLogs,proto3" json:"deprecated_scope_logs,omitempty"` - // The resource for the logs in this message. - // If this field is not set then resource info is unknown. - Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` - // A list of ScopeLogs that originate from a resource. - ScopeLogs []*ScopeLogs `protobuf:"bytes,2,rep,name=scope_logs,json=scopeLogs,proto3" json:"scope_logs,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_logs" field which have their own schema_url field. - SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` -} - -func (m *ResourceLogs) Reset() { *m = ResourceLogs{} } -func (m *ResourceLogs) String() string { return proto.CompactTextString(m) } -func (*ResourceLogs) ProtoMessage() {} -func (*ResourceLogs) Descriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{1} -} -func (m *ResourceLogs) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceLogs.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceLogs) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceLogs.Merge(m, src) -} -func (m *ResourceLogs) XXX_Size() int { - return m.Size() -} -func (m *ResourceLogs) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceLogs.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceLogs proto.InternalMessageInfo - -func (m *ResourceLogs) GetDeprecatedScopeLogs() []*ScopeLogs { - if m != nil { - return m.DeprecatedScopeLogs - } - return nil -} - -func (m *ResourceLogs) GetResource() v1.Resource { - if m != nil { - return m.Resource - } - return v1.Resource{} -} - -func (m *ResourceLogs) GetScopeLogs() []*ScopeLogs { - if m != nil { - return m.ScopeLogs - } - return nil -} - -func (m *ResourceLogs) GetSchemaUrl() string { - if m != nil { - return m.SchemaUrl - } - return "" -} - -// A collection of Logs produced by a Scope. -type ScopeLogs struct { - // The instrumentation scope information for the logs in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"` - // A list of log records. - LogRecords []*LogRecord `protobuf:"bytes,2,rep,name=log_records,json=logRecords,proto3" json:"log_records,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the log data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all logs in the "logs" field. - SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` -} - -func (m *ScopeLogs) Reset() { *m = ScopeLogs{} } -func (m *ScopeLogs) String() string { return proto.CompactTextString(m) } -func (*ScopeLogs) ProtoMessage() {} -func (*ScopeLogs) Descriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{2} -} -func (m *ScopeLogs) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScopeLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ScopeLogs.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ScopeLogs) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScopeLogs.Merge(m, src) -} -func (m *ScopeLogs) XXX_Size() int { - return m.Size() -} -func (m *ScopeLogs) XXX_DiscardUnknown() { - xxx_messageInfo_ScopeLogs.DiscardUnknown(m) -} - -var xxx_messageInfo_ScopeLogs proto.InternalMessageInfo - -func (m *ScopeLogs) GetScope() v11.InstrumentationScope { - if m != nil { - return m.Scope - } - return v11.InstrumentationScope{} -} - -func (m *ScopeLogs) GetLogRecords() []*LogRecord { - if m != nil { - return m.LogRecords - } - return nil -} - -func (m *ScopeLogs) GetSchemaUrl() string { - if m != nil { - return m.SchemaUrl - } - return "" -} - -// A log record according to OpenTelemetry Log Data Model: -// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md -type LogRecord struct { - // time_unix_nano is the time when the event occurred. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // Value of 0 indicates unknown or missing timestamp. - TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // Time when the event was observed by the collection system. - // For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK) - // this timestamp is typically set at the generation time and is equal to Timestamp. - // For events originating externally and collected by OpenTelemetry (e.g. using - // Collector) this is the time when OpenTelemetry's code observed the event measured - // by the clock of the OpenTelemetry code. This field MUST be set once the event is - // observed by OpenTelemetry. - // - // For converting OpenTelemetry log data to formats that support only one timestamp or - // when receiving OpenTelemetry log data by recipients that support only one timestamp - // internally the following logic is recommended: - // - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // Value of 0 indicates unknown or missing timestamp. - ObservedTimeUnixNano uint64 `protobuf:"fixed64,11,opt,name=observed_time_unix_nano,json=observedTimeUnixNano,proto3" json:"observed_time_unix_nano,omitempty"` - // Numerical value of the severity, normalized to values described in Log Data Model. - // [Optional]. - SeverityNumber SeverityNumber `protobuf:"varint,2,opt,name=severity_number,json=severityNumber,proto3,enum=opentelemetry.proto.logs.v1.SeverityNumber" json:"severity_number,omitempty"` - // The severity text (also known as log level). The original string representation as - // it is known at the source. [Optional]. - SeverityText string `protobuf:"bytes,3,opt,name=severity_text,json=severityText,proto3" json:"severity_text,omitempty"` - // A value containing the body of the log record. Can be for example a human-readable - // string message (including multi-line) describing the event in a free form or it can - // be a structured data composed of arrays and maps of other values. [Optional]. - Body v11.AnyValue `protobuf:"bytes,5,opt,name=body,proto3" json:"body"` - // Additional attributes that describe the specific event occurrence. [Optional]. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attributes []v11.KeyValue `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes"` - DroppedAttributesCount uint32 `protobuf:"varint,7,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // Flags, a bit field. 8 least significant bits are the trace flags as - // defined in W3C Trace Context specification. 24 most significant bits are reserved - // and must be set to 0. Readers must not assume that 24 most significant bits - // will be zero and must correctly mask the bits when reading 8-bit trace flag (use - // flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional]. - Flags uint32 `protobuf:"fixed32,8,opt,name=flags,proto3" json:"flags,omitempty"` - // A unique identifier for a trace. All logs from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is optional. - // - // The receivers SHOULD assume that the log record is not associated with a - // trace if any of the following is true: - // - the field is not present, - // - the field contains an invalid value. - TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,9,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"` - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes OR of length - // other than 8 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is optional. If the sender specifies a valid span_id then it SHOULD also - // specify a valid trace_id. - // - // The receivers SHOULD assume that the log record is not associated with a - // span if any of the following is true: - // - the field is not present, - // - the field contains an invalid value. - SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"` - // A unique identifier of event category/type. - // All events with the same event_name are expected to conform to the same - // schema for both their attributes and their body. - // - // Recommended to be fully qualified and short (no longer than 256 characters). - // - // Presence of event_name on the log record identifies this record - // as an event. - // - // [Optional]. - EventName string `protobuf:"bytes,12,opt,name=event_name,json=eventName,proto3" json:"event_name,omitempty"` -} - -func (m *LogRecord) Reset() { *m = LogRecord{} } -func (m *LogRecord) String() string { return proto.CompactTextString(m) } -func (*LogRecord) ProtoMessage() {} -func (*LogRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{3} -} -func (m *LogRecord) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LogRecord.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LogRecord) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogRecord.Merge(m, src) -} -func (m *LogRecord) XXX_Size() int { - return m.Size() -} -func (m *LogRecord) XXX_DiscardUnknown() { - xxx_messageInfo_LogRecord.DiscardUnknown(m) -} - -var xxx_messageInfo_LogRecord proto.InternalMessageInfo - -func (m *LogRecord) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *LogRecord) GetObservedTimeUnixNano() uint64 { - if m != nil { - return m.ObservedTimeUnixNano - } - return 0 -} - -func (m *LogRecord) GetSeverityNumber() SeverityNumber { - if m != nil { - return m.SeverityNumber - } - return SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED -} - -func (m *LogRecord) GetSeverityText() string { - if m != nil { - return m.SeverityText - } - return "" -} - -func (m *LogRecord) GetBody() v11.AnyValue { - if m != nil { - return m.Body - } - return v11.AnyValue{} -} - -func (m *LogRecord) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *LogRecord) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -func (m *LogRecord) GetFlags() uint32 { - if m != nil { - return m.Flags - } - return 0 -} - -func (m *LogRecord) GetEventName() string { - if m != nil { - return m.EventName - } - return "" -} - -func init() { - proto.RegisterEnum("opentelemetry.proto.logs.v1.SeverityNumber", SeverityNumber_name, SeverityNumber_value) - proto.RegisterEnum("opentelemetry.proto.logs.v1.LogRecordFlags", LogRecordFlags_name, LogRecordFlags_value) - proto.RegisterType((*LogsData)(nil), "opentelemetry.proto.logs.v1.LogsData") - proto.RegisterType((*ResourceLogs)(nil), "opentelemetry.proto.logs.v1.ResourceLogs") - proto.RegisterType((*ScopeLogs)(nil), "opentelemetry.proto.logs.v1.ScopeLogs") - proto.RegisterType((*LogRecord)(nil), "opentelemetry.proto.logs.v1.LogRecord") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/logs/v1/logs.proto", fileDescriptor_d1c030a3ec7e961e) -} - -var fileDescriptor_d1c030a3ec7e961e = []byte{ - // 971 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0x41, 0x6f, 0xe2, 0x46, - 0x1b, 0xc7, 0x71, 0x12, 0x02, 0x4c, 0x08, 0x3b, 0xef, 0x2c, 0xc9, 0xfa, 0x4d, 0x54, 0x42, 0xd3, - 0x2a, 0xa5, 0xa9, 0x04, 0x0a, 0x50, 0x69, 0x7b, 0xab, 0x09, 0x26, 0xa2, 0x21, 0x10, 0x0d, 0x90, - 0x2a, 0xdb, 0x4a, 0x96, 0xc1, 0x53, 0x6a, 0xc9, 0xcc, 0x58, 0xf6, 0x80, 0x92, 0x6f, 0xd1, 0x4f, - 0xd0, 0x4b, 0x0f, 0x95, 0xfa, 0x35, 0xda, 0xc3, 0x1e, 0xf7, 0x58, 0xf5, 0xb0, 0xaa, 0x92, 0x4b, - 0xbf, 0x45, 0xab, 0x19, 0x0c, 0x21, 0xa9, 0x9d, 0x34, 0x27, 0x66, 0x9e, 0xdf, 0xff, 0xf9, 0x3f, - 0xcf, 0x78, 0xc6, 0x83, 0xc1, 0x01, 0x73, 0x09, 0xe5, 0xc4, 0x21, 0x63, 0xc2, 0xbd, 0xeb, 0x92, - 0xeb, 0x31, 0xce, 0x4a, 0x0e, 0x1b, 0xf9, 0xa5, 0xe9, 0x91, 0xfc, 0x2d, 0xca, 0x10, 0xda, 0xbd, - 0xa7, 0x9b, 0x05, 0x8b, 0x92, 0x4f, 0x8f, 0x76, 0xb2, 0x23, 0x36, 0x62, 0xb3, 0x54, 0x31, 0x9a, - 0xd1, 0x9d, 0xc3, 0x30, 0xeb, 0x21, 0x1b, 0x8f, 0x19, 0x15, 0xe6, 0xb3, 0x51, 0xa0, 0x2d, 0x86, - 0x69, 0x3d, 0xe2, 0xb3, 0x89, 0x37, 0x24, 0x42, 0x3d, 0x1f, 0xcf, 0xf4, 0xfb, 0x6f, 0x40, 0xb2, - 0xc5, 0x46, 0x7e, 0xdd, 0xe4, 0x26, 0x6a, 0x83, 0xcd, 0x39, 0x35, 0x44, 0x47, 0xaa, 0x92, 0x5f, - 0x2d, 0x6c, 0x94, 0x3f, 0x2d, 0x3e, 0xd2, 0x72, 0x11, 0x07, 0x19, 0xc2, 0x05, 0xa7, 0xbd, 0xa5, - 0xd9, 0xfe, 0x8f, 0x2b, 0x20, 0xbd, 0x8c, 0xd1, 0x37, 0x60, 0xcb, 0x22, 0xae, 0x47, 0x86, 0x26, - 0x27, 0x96, 0xe1, 0x0f, 0x99, 0x1b, 0x14, 0xfa, 0x2b, 0x21, 0x2b, 0x1d, 0x3c, 0x5a, 0xa9, 0x2b, - 0xf4, 0xb2, 0xcc, 0xcb, 0x3b, 0x97, 0x45, 0x10, 0x9d, 0x82, 0xe4, 0xbc, 0xba, 0xaa, 0xe4, 0x95, - 0xc8, 0xc6, 0x17, 0x0f, 0x60, 0xa9, 0xf9, 0xda, 0xda, 0xdb, 0xf7, 0x7b, 0x31, 0xbc, 0x30, 0x40, - 0x3a, 0x00, 0x4b, 0xed, 0xad, 0x3c, 0xab, 0xbb, 0x94, 0xbf, 0xe8, 0xe9, 0x03, 0x61, 0xf3, 0x3d, - 0x19, 0x9b, 0xc6, 0xc4, 0x73, 0xd4, 0xd5, 0xbc, 0x52, 0x48, 0x09, 0x2c, 0x22, 0x7d, 0xcf, 0xd9, - 0xff, 0x4d, 0x01, 0xa9, 0xbb, 0x05, 0x74, 0x40, 0x5c, 0x66, 0x06, 0xdd, 0x57, 0x42, 0xcb, 0x05, - 0x9b, 0x3d, 0x3d, 0x2a, 0x36, 0xa9, 0xcf, 0xbd, 0xc9, 0x98, 0x50, 0x6e, 0x72, 0x9b, 0x51, 0xe9, - 0x13, 0xac, 0x63, 0xe6, 0x83, 0x4e, 0xc0, 0x86, 0xc3, 0x46, 0x86, 0x47, 0x86, 0xcc, 0xb3, 0xfe, - 0xdb, 0x2a, 0x5a, 0x6c, 0x84, 0xa5, 0x1c, 0x03, 0x67, 0x3e, 0x7c, 0x72, 0x19, 0x3f, 0xc5, 0x41, - 0x6a, 0x91, 0x88, 0x3e, 0x06, 0x19, 0x6e, 0x8f, 0x89, 0x31, 0xa1, 0xf6, 0x95, 0x41, 0x4d, 0xca, - 0xe4, 0x7a, 0xd6, 0x71, 0x5a, 0x44, 0xfb, 0xd4, 0xbe, 0x6a, 0x9b, 0x94, 0xa1, 0xcf, 0xc1, 0x2b, - 0x36, 0xf0, 0x89, 0x37, 0x25, 0x96, 0xf1, 0x40, 0xbe, 0x21, 0xe5, 0xd9, 0x39, 0xee, 0x2d, 0xa7, - 0xf5, 0xc0, 0x0b, 0x9f, 0x4c, 0x89, 0x67, 0xf3, 0x6b, 0x83, 0x4e, 0xc6, 0x03, 0xe2, 0xa9, 0x2b, - 0x79, 0xa5, 0x90, 0x29, 0x7f, 0xf6, 0xf8, 0xe6, 0x04, 0x39, 0x6d, 0x99, 0x82, 0x33, 0xfe, 0xbd, - 0x39, 0xfa, 0x08, 0x6c, 0x2e, 0x5c, 0x39, 0xb9, 0xe2, 0xc1, 0x12, 0xd3, 0xf3, 0x60, 0x8f, 0x5c, - 0x71, 0xa4, 0x81, 0xb5, 0x01, 0xb3, 0xae, 0xd5, 0xb8, 0xdc, 0x9d, 0x4f, 0x9e, 0xd8, 0x1d, 0x8d, - 0x5e, 0x5f, 0x98, 0xce, 0x64, 0xbe, 0x23, 0x32, 0x15, 0x9d, 0x01, 0x60, 0x72, 0xee, 0xd9, 0x83, - 0x09, 0x27, 0xbe, 0xba, 0x2e, 0xf7, 0xe3, 0x29, 0xa3, 0x53, 0x72, 0xcf, 0x68, 0xc9, 0x00, 0xbd, - 0x06, 0xaa, 0xe5, 0x31, 0xd7, 0x25, 0x96, 0x71, 0x17, 0x35, 0x86, 0x6c, 0x42, 0xb9, 0x9a, 0xc8, - 0x2b, 0x85, 0x4d, 0xbc, 0x1d, 0x70, 0x6d, 0x81, 0x8f, 0x05, 0x45, 0x59, 0x10, 0xff, 0xce, 0x31, - 0x47, 0xbe, 0x9a, 0xcc, 0x2b, 0x85, 0x04, 0x9e, 0x4d, 0xd0, 0xb7, 0x20, 0xc9, 0x3d, 0x73, 0x48, - 0x0c, 0xdb, 0x52, 0x53, 0x79, 0xa5, 0x90, 0xae, 0x69, 0xa2, 0xe6, 0x1f, 0xef, 0xf7, 0xbe, 0x18, - 0xb1, 0x07, 0x6d, 0xda, 0xe2, 0x06, 0x72, 0x1c, 0x32, 0xe4, 0xcc, 0x2b, 0xb9, 0x96, 0xc9, 0xcd, - 0x92, 0x4d, 0x39, 0xf1, 0xa8, 0xe9, 0x94, 0xc4, 0xac, 0xd8, 0x13, 0x4e, 0xcd, 0x3a, 0x4e, 0x48, - 0xcb, 0xa6, 0x85, 0x2e, 0x41, 0xc2, 0x77, 0x4d, 0x2a, 0xcc, 0x81, 0x34, 0xff, 0x32, 0x30, 0x7f, - 0xfd, 0x7c, 0xf3, 0xae, 0x6b, 0xd2, 0x66, 0x1d, 0xaf, 0x0b, 0xc3, 0xa6, 0x25, 0xce, 0x27, 0x99, - 0x12, 0xca, 0x0d, 0x6a, 0x8e, 0x89, 0x9a, 0x9e, 0x9d, 0x4f, 0x19, 0x69, 0x9b, 0x63, 0xf2, 0xd5, - 0x5a, 0x72, 0x0d, 0xc6, 0x0f, 0x7f, 0x8d, 0x83, 0xcc, 0xfd, 0x73, 0x80, 0xf6, 0xc0, 0x6e, 0x57, - 0xbf, 0xd0, 0x71, 0xb3, 0x77, 0x69, 0xb4, 0xfb, 0x67, 0x35, 0x1d, 0x1b, 0xfd, 0x76, 0xf7, 0x5c, - 0x3f, 0x6e, 0x36, 0x9a, 0x7a, 0x1d, 0xc6, 0xd0, 0xff, 0xc1, 0xd6, 0x43, 0x41, 0x0f, 0x6b, 0xc7, - 0x3a, 0x54, 0xd0, 0x0e, 0xd8, 0x0e, 0x45, 0x65, 0xb8, 0x12, 0xc9, 0x2a, 0x70, 0x35, 0x92, 0x55, - 0xe1, 0x5a, 0x58, 0xb9, 0xba, 0x5e, 0xeb, 0x9f, 0xc0, 0x78, 0x58, 0x9a, 0x44, 0x65, 0xb8, 0x1e, - 0xc9, 0x2a, 0x30, 0x11, 0xc9, 0xaa, 0x30, 0x89, 0x54, 0x90, 0x7d, 0xc8, 0x9a, 0xed, 0x46, 0x07, - 0xa6, 0xc2, 0x1a, 0x11, 0xa4, 0x0c, 0x41, 0x14, 0xaa, 0xc0, 0x8d, 0x28, 0x54, 0x85, 0xe9, 0xb0, - 0x52, 0x5f, 0x6b, 0xb8, 0x0d, 0x37, 0xc3, 0x92, 0x04, 0x29, 0xc3, 0x4c, 0x14, 0xaa, 0xc0, 0x17, - 0x51, 0xa8, 0x0a, 0x61, 0x18, 0xd2, 0x31, 0xee, 0x60, 0xf8, 0xbf, 0xb0, 0x87, 0x21, 0x51, 0x19, - 0xa2, 0x48, 0x56, 0x81, 0x2f, 0x23, 0x59, 0x15, 0x66, 0xc3, 0xca, 0x35, 0xb4, 0x9e, 0xd6, 0x82, - 0x5b, 0x61, 0x69, 0x12, 0x95, 0xe1, 0x76, 0x24, 0xab, 0xc0, 0x57, 0x91, 0xac, 0x0a, 0xd5, 0xc3, - 0x4b, 0x90, 0x59, 0x5c, 0xb5, 0x0d, 0xf9, 0xd6, 0xee, 0x81, 0xdd, 0x56, 0xe7, 0xc4, 0xc0, 0xfa, - 0x71, 0x07, 0xd7, 0x8d, 0x46, 0x4b, 0x3b, 0xe9, 0x1a, 0xf5, 0x8e, 0xd1, 0xee, 0xf4, 0x8c, 0x7e, - 0x57, 0x87, 0x31, 0x74, 0x00, 0x3e, 0xfc, 0x97, 0x40, 0x1e, 0xb9, 0x60, 0x7c, 0xa6, 0x75, 0x4f, - 0xe1, 0xdf, 0x4a, 0xed, 0x67, 0xe5, 0xed, 0x4d, 0x4e, 0x79, 0x77, 0x93, 0x53, 0xfe, 0xbc, 0xc9, - 0x29, 0x3f, 0xdc, 0xe6, 0x62, 0xef, 0x6e, 0x73, 0xb1, 0xdf, 0x6f, 0x73, 0x31, 0x90, 0xb3, 0xd9, - 0x63, 0xf7, 0x6b, 0x4d, 0x5c, 0xff, 0xfe, 0xb9, 0x08, 0x9d, 0x2b, 0x6f, 0x6a, 0xcf, 0x7e, 0x9f, - 0x67, 0x9f, 0x29, 0x23, 0x42, 0xe7, 0x1f, 0x4c, 0xbf, 0xac, 0xec, 0x76, 0x5c, 0x42, 0x7b, 0x0b, - 0x07, 0xe9, 0x2d, 0xfe, 0x9d, 0xfc, 0xe2, 0xc5, 0xd1, 0x60, 0x5d, 0xea, 0x2b, 0xff, 0x04, 0x00, - 0x00, 0xff, 0xff, 0xc9, 0xbc, 0x36, 0x44, 0x74, 0x09, 0x00, 0x00, -} - -func (m *LogsData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LogsData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LogsData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceLogs) > 0 { - for iNdEx := len(m.ResourceLogs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResourceLogs) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceLogs) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.DeprecatedScopeLogs) > 0 { - for iNdEx := len(m.DeprecatedScopeLogs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DeprecatedScopeLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3e - i-- - dAtA[i] = 0xc2 - } - } - if len(m.SchemaUrl) > 0 { - i -= len(m.SchemaUrl) - copy(dAtA[i:], m.SchemaUrl) - i = encodeVarintLogs(dAtA, i, uint64(len(m.SchemaUrl))) - i-- - dAtA[i] = 0x1a - } - if len(m.ScopeLogs) > 0 { - for iNdEx := len(m.ScopeLogs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ScopeLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ScopeLogs) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScopeLogs) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScopeLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SchemaUrl) > 0 { - i -= len(m.SchemaUrl) - copy(dAtA[i:], m.SchemaUrl) - i = encodeVarintLogs(dAtA, i, uint64(len(m.SchemaUrl))) - i-- - dAtA[i] = 0x1a - } - if len(m.LogRecords) > 0 { - for iNdEx := len(m.LogRecords) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.LogRecords[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *LogRecord) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LogRecord) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LogRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.EventName) > 0 { - i -= len(m.EventName) - copy(dAtA[i:], m.EventName) - i = encodeVarintLogs(dAtA, i, uint64(len(m.EventName))) - i-- - dAtA[i] = 0x62 - } - if m.ObservedTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ObservedTimeUnixNano)) - i-- - dAtA[i] = 0x59 - } - { - size := m.SpanId.Size() - i -= size - if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - { - size := m.TraceId.Size() - i -= size - if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - if m.Flags != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags)) - i-- - dAtA[i] = 0x45 - } - if m.DroppedAttributesCount != 0 { - i = encodeVarintLogs(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x38 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - { - size, err := m.Body.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - if len(m.SeverityText) > 0 { - i -= len(m.SeverityText) - copy(dAtA[i:], m.SeverityText) - i = encodeVarintLogs(dAtA, i, uint64(len(m.SeverityText))) - i-- - dAtA[i] = 0x1a - } - if m.SeverityNumber != 0 { - i = encodeVarintLogs(dAtA, i, uint64(m.SeverityNumber)) - i-- - dAtA[i] = 0x10 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func encodeVarintLogs(dAtA []byte, offset int, v uint64) int { - offset -= sovLogs(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *LogsData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceLogs) > 0 { - for _, e := range m.ResourceLogs { - l = e.Size() - n += 1 + l + sovLogs(uint64(l)) - } - } - return n -} - -func (m *ResourceLogs) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Resource.Size() - n += 1 + l + sovLogs(uint64(l)) - if len(m.ScopeLogs) > 0 { - for _, e := range m.ScopeLogs { - l = e.Size() - n += 1 + l + sovLogs(uint64(l)) - } - } - l = len(m.SchemaUrl) - if l > 0 { - n += 1 + l + sovLogs(uint64(l)) - } - if len(m.DeprecatedScopeLogs) > 0 { - for _, e := range m.DeprecatedScopeLogs { - l = e.Size() - n += 2 + l + sovLogs(uint64(l)) - } - } - return n -} - -func (m *ScopeLogs) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Scope.Size() - n += 1 + l + sovLogs(uint64(l)) - if len(m.LogRecords) > 0 { - for _, e := range m.LogRecords { - l = e.Size() - n += 1 + l + sovLogs(uint64(l)) - } - } - l = len(m.SchemaUrl) - if l > 0 { - n += 1 + l + sovLogs(uint64(l)) - } - return n -} - -func (m *LogRecord) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TimeUnixNano != 0 { - n += 9 - } - if m.SeverityNumber != 0 { - n += 1 + sovLogs(uint64(m.SeverityNumber)) - } - l = len(m.SeverityText) - if l > 0 { - n += 1 + l + sovLogs(uint64(l)) - } - l = m.Body.Size() - n += 1 + l + sovLogs(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovLogs(uint64(l)) - } - } - if m.DroppedAttributesCount != 0 { - n += 1 + sovLogs(uint64(m.DroppedAttributesCount)) - } - if m.Flags != 0 { - n += 5 - } - l = m.TraceId.Size() - n += 1 + l + sovLogs(uint64(l)) - l = m.SpanId.Size() - n += 1 + l + sovLogs(uint64(l)) - if m.ObservedTimeUnixNano != 0 { - n += 9 - } - l = len(m.EventName) - if l > 0 { - n += 1 + l + sovLogs(uint64(l)) - } - return n -} - -func sovLogs(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozLogs(x uint64) (n int) { - return sovLogs(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *LogsData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LogsData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LogsData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceLogs = append(m.ResourceLogs, &ResourceLogs{}) - if err := m.ResourceLogs[len(m.ResourceLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogs(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLogs - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceLogs) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceLogs: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceLogs: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScopeLogs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ScopeLogs = append(m.ScopeLogs, &ScopeLogs{}) - if err := m.ScopeLogs[len(m.ScopeLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 1000: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeLogs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeprecatedScopeLogs = append(m.DeprecatedScopeLogs, &ScopeLogs{}) - if err := m.DeprecatedScopeLogs[len(m.DeprecatedScopeLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogs(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLogs - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScopeLogs) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScopeLogs: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScopeLogs: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LogRecords", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LogRecords = append(m.LogRecords, &LogRecord{}) - if err := m.LogRecords[len(m.LogRecords)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogs(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLogs - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LogRecord) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LogRecord: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LogRecord: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SeverityNumber", wireType) - } - m.SeverityNumber = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SeverityNumber |= SeverityNumber(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SeverityText", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SeverityText = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Body.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) - } - m.Flags = 0 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedTimeUnixNano", wireType) - } - m.ObservedTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.ObservedTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EventName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EventName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogs(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLogs - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipLogs(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogs - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogs - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogs - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthLogs - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupLogs - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthLogs - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthLogs = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowLogs = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupLogs = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go deleted file mode 100644 index 719652cceb2..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go +++ /dev/null @@ -1,6695 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/metrics/v1/metrics.proto - -package v1 - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - - go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data" - v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// AggregationTemporality defines how a metric aggregator reports aggregated -// values. It describes how those values relate to the time interval over -// which they are aggregated. -type AggregationTemporality int32 - -const ( - // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. - AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0 - // DELTA is an AggregationTemporality for a metric aggregator which reports - // changes since last report time. Successive metrics contain aggregation of - // values from continuous and non-overlapping intervals. - // - // The values for a DELTA metric are based only on the time interval - // associated with one measurement cycle. There is no dependency on - // previous measurements like is the case for CUMULATIVE metrics. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // DELTA metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0+1 to - // t_0+2 with a value of 2. - AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1 - // CUMULATIVE is an AggregationTemporality for a metric aggregator which - // reports changes since a fixed start time. This means that current values - // of a CUMULATIVE metric depend on all previous measurements since the - // start time. Because of this, the sender is required to retain this state - // in some form. If this state is lost or invalidated, the CUMULATIVE metric - // values MUST be reset and a new fixed start time following the last - // reported measurement time sent MUST be used. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // CUMULATIVE metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+2 with a value of 5. - // 9. The system experiences a fault and loses state. - // 10. The system recovers and resumes receiving at time=t_1. - // 11. A request is received, the system measures 1 request. - // 12. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_1 to - // t_0+1 with a value of 1. - // - // Note: Even though, when reporting changes since last report time, using - // CUMULATIVE is valid, it is not recommended. This may cause problems for - // systems that do not use start_time to determine when the aggregation - // value was reset (e.g. Prometheus). - AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2 -) - -var AggregationTemporality_name = map[int32]string{ - 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED", - 1: "AGGREGATION_TEMPORALITY_DELTA", - 2: "AGGREGATION_TEMPORALITY_CUMULATIVE", -} - -var AggregationTemporality_value = map[string]int32{ - "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0, - "AGGREGATION_TEMPORALITY_DELTA": 1, - "AGGREGATION_TEMPORALITY_CUMULATIVE": 2, -} - -func (x AggregationTemporality) String() string { - return proto.EnumName(AggregationTemporality_name, int32(x)) -} - -func (AggregationTemporality) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{0} -} - -// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a -// bit-field representing 32 distinct boolean flags. Each flag defined in this -// enum is a bit-mask. To test the presence of a single flag in the flags of -// a data point, for example, use an expression like: -// -// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK -type DataPointFlags int32 - -const ( - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE DataPointFlags = 0 - // This DataPoint is valid but has no recorded value. This value - // SHOULD be used to reflect explicitly missing data in a series, as - // for an equivalent to the Prometheus "staleness marker". - DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK DataPointFlags = 1 -) - -var DataPointFlags_name = map[int32]string{ - 0: "DATA_POINT_FLAGS_DO_NOT_USE", - 1: "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK", -} - -var DataPointFlags_value = map[string]int32{ - "DATA_POINT_FLAGS_DO_NOT_USE": 0, - "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK": 1, -} - -func (x DataPointFlags) String() string { - return proto.EnumName(DataPointFlags_name, int32(x)) -} - -func (DataPointFlags) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{1} -} - -// MetricsData represents the metrics data that can be stored in a persistent -// storage, OR can be embedded by other protocols that transfer OTLP metrics -// data but do not implement the OTLP protocol. -// -// MetricsData -// └─── ResourceMetrics -// -// ├── Resource -// ├── SchemaURL -// └── ScopeMetrics -// ├── Scope -// ├── SchemaURL -// └── Metric -// ├── Name -// ├── Description -// ├── Unit -// └── data -// ├── Gauge -// ├── Sum -// ├── Histogram -// ├── ExponentialHistogram -// └── Summary -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -type MetricsData struct { - // An array of ResourceMetrics. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"` -} - -func (m *MetricsData) Reset() { *m = MetricsData{} } -func (m *MetricsData) String() string { return proto.CompactTextString(m) } -func (*MetricsData) ProtoMessage() {} -func (*MetricsData) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{0} -} -func (m *MetricsData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricsData) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsData.Merge(m, src) -} -func (m *MetricsData) XXX_Size() int { - return m.Size() -} -func (m *MetricsData) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsData.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricsData proto.InternalMessageInfo - -func (m *MetricsData) GetResourceMetrics() []*ResourceMetrics { - if m != nil { - return m.ResourceMetrics - } - return nil -} - -// A collection of ScopeMetrics from a Resource. -type ResourceMetrics struct { - DeprecatedScopeMetrics []*ScopeMetrics `protobuf:"bytes,1000,rep,name=deprecated_scope_metrics,json=deprecatedScopeMetrics,proto3" json:"deprecated_scope_metrics,omitempty"` - // The resource for the metrics in this message. - // If this field is not set then no resource info is known. - Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` - // A list of metrics that originate from a resource. - ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_metrics" field which have their own schema_url field. - SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` -} - -func (m *ResourceMetrics) Reset() { *m = ResourceMetrics{} } -func (m *ResourceMetrics) String() string { return proto.CompactTextString(m) } -func (*ResourceMetrics) ProtoMessage() {} -func (*ResourceMetrics) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{1} -} -func (m *ResourceMetrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceMetrics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceMetrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceMetrics.Merge(m, src) -} -func (m *ResourceMetrics) XXX_Size() int { - return m.Size() -} -func (m *ResourceMetrics) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceMetrics.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceMetrics proto.InternalMessageInfo - -func (m *ResourceMetrics) GetDeprecatedScopeMetrics() []*ScopeMetrics { - if m != nil { - return m.DeprecatedScopeMetrics - } - return nil -} - -func (m *ResourceMetrics) GetResource() v1.Resource { - if m != nil { - return m.Resource - } - return v1.Resource{} -} - -func (m *ResourceMetrics) GetScopeMetrics() []*ScopeMetrics { - if m != nil { - return m.ScopeMetrics - } - return nil -} - -func (m *ResourceMetrics) GetSchemaUrl() string { - if m != nil { - return m.SchemaUrl - } - return "" -} - -// A collection of Metrics produced by an Scope. -type ScopeMetrics struct { - // The instrumentation scope information for the metrics in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"` - // A list of metrics that originate from an instrumentation library. - Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the metric data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all metrics in the "metrics" field. - SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` -} - -func (m *ScopeMetrics) Reset() { *m = ScopeMetrics{} } -func (m *ScopeMetrics) String() string { return proto.CompactTextString(m) } -func (*ScopeMetrics) ProtoMessage() {} -func (*ScopeMetrics) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{2} -} -func (m *ScopeMetrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScopeMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ScopeMetrics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ScopeMetrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScopeMetrics.Merge(m, src) -} -func (m *ScopeMetrics) XXX_Size() int { - return m.Size() -} -func (m *ScopeMetrics) XXX_DiscardUnknown() { - xxx_messageInfo_ScopeMetrics.DiscardUnknown(m) -} - -var xxx_messageInfo_ScopeMetrics proto.InternalMessageInfo - -func (m *ScopeMetrics) GetScope() v11.InstrumentationScope { - if m != nil { - return m.Scope - } - return v11.InstrumentationScope{} -} - -func (m *ScopeMetrics) GetMetrics() []*Metric { - if m != nil { - return m.Metrics - } - return nil -} - -func (m *ScopeMetrics) GetSchemaUrl() string { - if m != nil { - return m.SchemaUrl - } - return "" -} - -// Defines a Metric which has one or more timeseries. The following is a -// brief summary of the Metric data model. For more details, see: -// -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md -// -// The data model and relation between entities is shown in the -// diagram below. Here, "DataPoint" is the term used to refer to any -// one of the specific data point value types, and "points" is the term used -// to refer to any one of the lists of points contained in the Metric. -// -// - Metric is composed of a metadata and data. -// -// - Metadata part contains a name, description, unit. -// -// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). -// -// - DataPoint contains timestamps, attributes, and one of the possible value type -// fields. -// -// Metric -// +------------+ -// |name | -// |description | -// |unit | +------------------------------------+ -// |data |---> |Gauge, Sum, Histogram, Summary, ... | -// +------------+ +------------------------------------+ -// -// Data [One of Gauge, Sum, Histogram, Summary, ...] -// +-----------+ -// |... | // Metadata about the Data. -// |points |--+ -// +-----------+ | -// | +---------------------------+ -// | |DataPoint 1 | -// v |+------+------+ +------+ | -// +-----+ ||label |label |...|label | | -// | 1 |-->||value1|value2|...|valueN| | -// +-----+ |+------+------+ +------+ | -// | . | |+-----+ | -// | . | ||value| | -// | . | |+-----+ | -// | . | +---------------------------+ -// | . | . -// | . | . -// | . | . -// | . | +---------------------------+ -// | . | |DataPoint M | -// +-----+ |+------+------+ +------+ | -// | M |-->||label |label |...|label | | -// +-----+ ||value1|value2|...|valueN| | -// |+------+------+ +------+ | -// |+-----+ | -// ||value| | -// |+-----+ | -// +---------------------------+ -// -// Each distinct type of DataPoint represents the output of a specific -// aggregation function, the result of applying the DataPoint's -// associated function of to one or more measurements. -// -// All DataPoint types have three common fields: -// - Attributes includes key-value pairs associated with the data point -// - TimeUnixNano is required, set to the end time of the aggregation -// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints -// having an AggregationTemporality field, as discussed below. -// -// Both TimeUnixNano and StartTimeUnixNano values are expressed as -// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. -// -// # TimeUnixNano -// -// This field is required, having consistent interpretation across -// DataPoint types. TimeUnixNano is the moment corresponding to when -// the data point's aggregate value was captured. -// -// Data points with the 0 value for TimeUnixNano SHOULD be rejected -// by consumers. -// -// # StartTimeUnixNano -// -// StartTimeUnixNano in general allows detecting when a sequence of -// observations is unbroken. This field indicates to consumers the -// start time for points with cumulative and delta -// AggregationTemporality, and it should be included whenever possible -// to support correct rate calculation. Although it may be omitted -// when the start time is truly unknown, setting StartTimeUnixNano is -// strongly encouraged. -type Metric struct { - // name of the metric. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // description of the metric, which can be used in documentation. - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - // unit in which the metric value is reported. Follows the format - // described by https://unitsofmeasure.org/ucum.html. - Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` - // Data determines the aggregation type (if any) of the metric, what is the - // reported value type for the data points, as well as the relatationship to - // the time interval over which they are reported. - // - // Types that are valid to be assigned to Data: - // *Metric_Gauge - // *Metric_Sum - // *Metric_Histogram - // *Metric_ExponentialHistogram - // *Metric_Summary - Data isMetric_Data `protobuf_oneof:"data"` - // Additional metadata attributes that describe the metric. [Optional]. - // Attributes are non-identifying. - // Consumers SHOULD NOT need to be aware of these attributes. - // These attributes MAY be used to encode information allowing - // for lossless roundtrip translation to / from another data model. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Metadata []v11.KeyValue `protobuf:"bytes,12,rep,name=metadata,proto3" json:"metadata"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{3} -} -func (m *Metric) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return m.Size() -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -type isMetric_Data interface { - isMetric_Data() - MarshalTo([]byte) (int, error) - Size() int -} - -type Metric_Gauge struct { - Gauge *Gauge `protobuf:"bytes,5,opt,name=gauge,proto3,oneof" json:"gauge,omitempty"` -} -type Metric_Sum struct { - Sum *Sum `protobuf:"bytes,7,opt,name=sum,proto3,oneof" json:"sum,omitempty"` -} -type Metric_Histogram struct { - Histogram *Histogram `protobuf:"bytes,9,opt,name=histogram,proto3,oneof" json:"histogram,omitempty"` -} -type Metric_ExponentialHistogram struct { - ExponentialHistogram *ExponentialHistogram `protobuf:"bytes,10,opt,name=exponential_histogram,json=exponentialHistogram,proto3,oneof" json:"exponential_histogram,omitempty"` -} -type Metric_Summary struct { - Summary *Summary `protobuf:"bytes,11,opt,name=summary,proto3,oneof" json:"summary,omitempty"` -} - -func (*Metric_Gauge) isMetric_Data() {} -func (*Metric_Sum) isMetric_Data() {} -func (*Metric_Histogram) isMetric_Data() {} -func (*Metric_ExponentialHistogram) isMetric_Data() {} -func (*Metric_Summary) isMetric_Data() {} - -func (m *Metric) GetData() isMetric_Data { - if m != nil { - return m.Data - } - return nil -} - -func (m *Metric) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Metric) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Metric) GetUnit() string { - if m != nil { - return m.Unit - } - return "" -} - -func (m *Metric) GetGauge() *Gauge { - if x, ok := m.GetData().(*Metric_Gauge); ok { - return x.Gauge - } - return nil -} - -func (m *Metric) GetSum() *Sum { - if x, ok := m.GetData().(*Metric_Sum); ok { - return x.Sum - } - return nil -} - -func (m *Metric) GetHistogram() *Histogram { - if x, ok := m.GetData().(*Metric_Histogram); ok { - return x.Histogram - } - return nil -} - -func (m *Metric) GetExponentialHistogram() *ExponentialHistogram { - if x, ok := m.GetData().(*Metric_ExponentialHistogram); ok { - return x.ExponentialHistogram - } - return nil -} - -func (m *Metric) GetSummary() *Summary { - if x, ok := m.GetData().(*Metric_Summary); ok { - return x.Summary - } - return nil -} - -func (m *Metric) GetMetadata() []v11.KeyValue { - if m != nil { - return m.Metadata - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Metric) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Metric_Gauge)(nil), - (*Metric_Sum)(nil), - (*Metric_Histogram)(nil), - (*Metric_ExponentialHistogram)(nil), - (*Metric_Summary)(nil), - } -} - -// Gauge represents the type of a scalar metric that always exports the -// "current value" for every data point. It should be used for an "unknown" -// aggregation. -// -// A Gauge does not support different aggregation temporalities. Given the -// aggregation is unknown, points cannot be combined using the same -// aggregation, regardless of aggregation temporalities. Therefore, -// AggregationTemporality is not included. Consequently, this also means -// "StartTimeUnixNano" is ignored for all data points. -type Gauge struct { - DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` -} - -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{4} -} -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(m, src) -} -func (m *Gauge) XXX_Size() int { - return m.Size() -} -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) -} - -var xxx_messageInfo_Gauge proto.InternalMessageInfo - -func (m *Gauge) GetDataPoints() []*NumberDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -// Sum represents the type of a scalar metric that is calculated as a sum of all -// reported measurements over a time interval. -type Sum struct { - DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` - // If "true" means that the sum is monotonic. - IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` -} - -func (m *Sum) Reset() { *m = Sum{} } -func (m *Sum) String() string { return proto.CompactTextString(m) } -func (*Sum) ProtoMessage() {} -func (*Sum) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{5} -} -func (m *Sum) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Sum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Sum.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Sum) XXX_Merge(src proto.Message) { - xxx_messageInfo_Sum.Merge(m, src) -} -func (m *Sum) XXX_Size() int { - return m.Size() -} -func (m *Sum) XXX_DiscardUnknown() { - xxx_messageInfo_Sum.DiscardUnknown(m) -} - -var xxx_messageInfo_Sum proto.InternalMessageInfo - -func (m *Sum) GetDataPoints() []*NumberDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -func (m *Sum) GetAggregationTemporality() AggregationTemporality { - if m != nil { - return m.AggregationTemporality - } - return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -func (m *Sum) GetIsMonotonic() bool { - if m != nil { - return m.IsMonotonic - } - return false -} - -// Histogram represents the type of a metric that is calculated by aggregating -// as a Histogram of all reported measurements over a time interval. -type Histogram struct { - DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` -} - -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{6} -} -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) -} -func (m *Histogram) XXX_Size() int { - return m.Size() -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) -} - -var xxx_messageInfo_Histogram proto.InternalMessageInfo - -func (m *Histogram) GetDataPoints() []*HistogramDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -func (m *Histogram) GetAggregationTemporality() AggregationTemporality { - if m != nil { - return m.AggregationTemporality - } - return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -// ExponentialHistogram represents the type of a metric that is calculated by aggregating -// as a ExponentialHistogram of all reported double measurements over a time interval. -type ExponentialHistogram struct { - DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` -} - -func (m *ExponentialHistogram) Reset() { *m = ExponentialHistogram{} } -func (m *ExponentialHistogram) String() string { return proto.CompactTextString(m) } -func (*ExponentialHistogram) ProtoMessage() {} -func (*ExponentialHistogram) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{7} -} -func (m *ExponentialHistogram) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExponentialHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExponentialHistogram.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExponentialHistogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExponentialHistogram.Merge(m, src) -} -func (m *ExponentialHistogram) XXX_Size() int { - return m.Size() -} -func (m *ExponentialHistogram) XXX_DiscardUnknown() { - xxx_messageInfo_ExponentialHistogram.DiscardUnknown(m) -} - -var xxx_messageInfo_ExponentialHistogram proto.InternalMessageInfo - -func (m *ExponentialHistogram) GetDataPoints() []*ExponentialHistogramDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -func (m *ExponentialHistogram) GetAggregationTemporality() AggregationTemporality { - if m != nil { - return m.AggregationTemporality - } - return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -// Summary metric data are used to convey quantile summaries, -// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) -// and OpenMetrics (see: https://github.com/prometheus/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) -// data type. These data points cannot always be merged in a meaningful way. -// While they can be useful in some applications, histogram data points are -// recommended for new applications. -// Summary metrics do not have an aggregation temporality field. This is -// because the count and sum fields of a SummaryDataPoint are assumed to be -// cumulative values. -type Summary struct { - DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` -} - -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} -func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{8} -} -func (m *Summary) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(m, src) -} -func (m *Summary) XXX_Size() int { - return m.Size() -} -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) -} - -var xxx_messageInfo_Summary proto.InternalMessageInfo - -func (m *Summary) GetDataPoints() []*SummaryDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -// NumberDataPoint is a single data point in a timeseries that describes the -// time-varying scalar value of a metric. -type NumberDataPoint struct { - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. - Attributes []v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes"` - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // The value itself. A point is considered invalid when one of the recognized - // value fields is not present inside this oneof. - // - // Types that are valid to be assigned to Value: - // *NumberDataPoint_AsDouble - // *NumberDataPoint_AsInt - Value isNumberDataPoint_Value `protobuf_oneof:"value"` - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - Exemplars []Exemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars"` - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"` -} - -func (m *NumberDataPoint) Reset() { *m = NumberDataPoint{} } -func (m *NumberDataPoint) String() string { return proto.CompactTextString(m) } -func (*NumberDataPoint) ProtoMessage() {} -func (*NumberDataPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{9} -} -func (m *NumberDataPoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NumberDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NumberDataPoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NumberDataPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_NumberDataPoint.Merge(m, src) -} -func (m *NumberDataPoint) XXX_Size() int { - return m.Size() -} -func (m *NumberDataPoint) XXX_DiscardUnknown() { - xxx_messageInfo_NumberDataPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_NumberDataPoint proto.InternalMessageInfo - -type isNumberDataPoint_Value interface { - isNumberDataPoint_Value() - MarshalTo([]byte) (int, error) - Size() int -} - -type NumberDataPoint_AsDouble struct { - AsDouble float64 `protobuf:"fixed64,4,opt,name=as_double,json=asDouble,proto3,oneof" json:"as_double,omitempty"` -} -type NumberDataPoint_AsInt struct { - AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof" json:"as_int,omitempty"` -} - -func (*NumberDataPoint_AsDouble) isNumberDataPoint_Value() {} -func (*NumberDataPoint_AsInt) isNumberDataPoint_Value() {} - -func (m *NumberDataPoint) GetValue() isNumberDataPoint_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *NumberDataPoint) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *NumberDataPoint) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *NumberDataPoint) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *NumberDataPoint) GetAsDouble() float64 { - if x, ok := m.GetValue().(*NumberDataPoint_AsDouble); ok { - return x.AsDouble - } - return 0 -} - -func (m *NumberDataPoint) GetAsInt() int64 { - if x, ok := m.GetValue().(*NumberDataPoint_AsInt); ok { - return x.AsInt - } - return 0 -} - -func (m *NumberDataPoint) GetExemplars() []Exemplar { - if m != nil { - return m.Exemplars - } - return nil -} - -func (m *NumberDataPoint) GetFlags() uint32 { - if m != nil { - return m.Flags - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*NumberDataPoint) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*NumberDataPoint_AsDouble)(nil), - (*NumberDataPoint_AsInt)(nil), - } -} - -// HistogramDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Histogram. A Histogram contains summary statistics -// for a population of values, it may optionally contain the distribution of -// those values across a set of buckets. -// -// If the histogram contains the distribution of values, then both -// "explicit_bounds" and "bucket counts" fields must be defined. -// If the histogram does not contain the distribution of values, then both -// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and -// "sum" are known. -type HistogramDataPoint struct { - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. - Attributes []v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes"` - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // count is the number of values in the population. Must be non-negative. This - // value must be equal to the sum of the "count" fields in buckets if a - // histogram is provided. - Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram - // - // Types that are valid to be assigned to Sum_: - // *HistogramDataPoint_Sum - Sum_ isHistogramDataPoint_Sum_ `protobuf_oneof:"sum_"` - // bucket_counts is an optional field contains the count values of histogram - // for each bucket. - // - // The sum of the bucket_counts must equal the value in the count field. - // - // The number of elements in bucket_counts array must be by one greater than - // the number of elements in explicit_bounds array. The exception to this rule - // is when the length of bucket_counts is 0, then the length of explicit_bounds - // must also be 0. - BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` - // explicit_bounds specifies buckets with explicitly defined bounds for values. - // - // The boundaries for bucket at index i are: - // - // (-infinity, explicit_bounds[i]] for i == 0 - // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) - // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) - // - // The values in the explicit_bounds array must be strictly increasing. - // - // Histogram buckets are inclusive of their upper boundary, except the last - // bucket where the boundary is at infinity. This format is intentionally - // compatible with the OpenMetrics histogram definition. - // - // If bucket_counts length is 0 then explicit_bounds length must also be 0, - // otherwise the data point is invalid. - ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - Exemplars []Exemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars"` - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"` - // min is the minimum value over (start_time, end_time]. - // - // Types that are valid to be assigned to Min_: - // *HistogramDataPoint_Min - Min_ isHistogramDataPoint_Min_ `protobuf_oneof:"min_"` - // max is the maximum value over (start_time, end_time]. - // - // Types that are valid to be assigned to Max_: - // *HistogramDataPoint_Max - Max_ isHistogramDataPoint_Max_ `protobuf_oneof:"max_"` -} - -func (m *HistogramDataPoint) Reset() { *m = HistogramDataPoint{} } -func (m *HistogramDataPoint) String() string { return proto.CompactTextString(m) } -func (*HistogramDataPoint) ProtoMessage() {} -func (*HistogramDataPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{10} -} -func (m *HistogramDataPoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HistogramDataPoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HistogramDataPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_HistogramDataPoint.Merge(m, src) -} -func (m *HistogramDataPoint) XXX_Size() int { - return m.Size() -} -func (m *HistogramDataPoint) XXX_DiscardUnknown() { - xxx_messageInfo_HistogramDataPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_HistogramDataPoint proto.InternalMessageInfo - -type isHistogramDataPoint_Sum_ interface { - isHistogramDataPoint_Sum_() - MarshalTo([]byte) (int, error) - Size() int -} -type isHistogramDataPoint_Min_ interface { - isHistogramDataPoint_Min_() - MarshalTo([]byte) (int, error) - Size() int -} -type isHistogramDataPoint_Max_ interface { - isHistogramDataPoint_Max_() - MarshalTo([]byte) (int, error) - Size() int -} - -type HistogramDataPoint_Sum struct { - Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"` -} -type HistogramDataPoint_Min struct { - Min float64 `protobuf:"fixed64,11,opt,name=min,proto3,oneof" json:"min,omitempty"` -} -type HistogramDataPoint_Max struct { - Max float64 `protobuf:"fixed64,12,opt,name=max,proto3,oneof" json:"max,omitempty"` -} - -func (*HistogramDataPoint_Sum) isHistogramDataPoint_Sum_() {} -func (*HistogramDataPoint_Min) isHistogramDataPoint_Min_() {} -func (*HistogramDataPoint_Max) isHistogramDataPoint_Max_() {} - -func (m *HistogramDataPoint) GetSum_() isHistogramDataPoint_Sum_ { - if m != nil { - return m.Sum_ - } - return nil -} -func (m *HistogramDataPoint) GetMin_() isHistogramDataPoint_Min_ { - if m != nil { - return m.Min_ - } - return nil -} -func (m *HistogramDataPoint) GetMax_() isHistogramDataPoint_Max_ { - if m != nil { - return m.Max_ - } - return nil -} - -func (m *HistogramDataPoint) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *HistogramDataPoint) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *HistogramDataPoint) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *HistogramDataPoint) GetCount() uint64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *HistogramDataPoint) GetSum() float64 { - if x, ok := m.GetSum_().(*HistogramDataPoint_Sum); ok { - return x.Sum - } - return 0 -} - -func (m *HistogramDataPoint) GetBucketCounts() []uint64 { - if m != nil { - return m.BucketCounts - } - return nil -} - -func (m *HistogramDataPoint) GetExplicitBounds() []float64 { - if m != nil { - return m.ExplicitBounds - } - return nil -} - -func (m *HistogramDataPoint) GetExemplars() []Exemplar { - if m != nil { - return m.Exemplars - } - return nil -} - -func (m *HistogramDataPoint) GetFlags() uint32 { - if m != nil { - return m.Flags - } - return 0 -} - -func (m *HistogramDataPoint) GetMin() float64 { - if x, ok := m.GetMin_().(*HistogramDataPoint_Min); ok { - return x.Min - } - return 0 -} - -func (m *HistogramDataPoint) GetMax() float64 { - if x, ok := m.GetMax_().(*HistogramDataPoint_Max); ok { - return x.Max - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*HistogramDataPoint) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*HistogramDataPoint_Sum)(nil), - (*HistogramDataPoint_Min)(nil), - (*HistogramDataPoint_Max)(nil), - } -} - -// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the -// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains -// summary statistics for a population of values, it may optionally contain the -// distribution of those values across a set of buckets. -type ExponentialHistogramDataPoint struct { - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. - Attributes []v11.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes"` - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // count is the number of values in the population. Must be - // non-negative. This value must be equal to the sum of the "bucket_counts" - // values in the positive and negative Buckets plus the "zero_count" field. - Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram - // - // Types that are valid to be assigned to Sum_: - // *ExponentialHistogramDataPoint_Sum - Sum_ isExponentialHistogramDataPoint_Sum_ `protobuf_oneof:"sum_"` - // scale describes the resolution of the histogram. Boundaries are - // located at powers of the base, where: - // - // base = (2^(2^-scale)) - // - // The histogram bucket identified by `index`, a signed integer, - // contains values that are greater than (base^index) and - // less than or equal to (base^(index+1)). - // - // The positive and negative ranges of the histogram are expressed - // separately. Negative values are mapped by their absolute value - // into the negative range using the same scale as the positive range. - // - // scale is not restricted by the protocol, as the permissible - // values depend on the range of the data. - Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"` - // zero_count is the count of values that are either exactly zero or - // within the region considered zero by the instrumentation at the - // tolerated degree of precision. This bucket stores values that - // cannot be expressed using the standard exponential formula as - // well as values that have been rounded to zero. - // - // Implementations MAY consider the zero bucket to have probability - // mass equal to (zero_count / count). - ZeroCount uint64 `protobuf:"fixed64,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"` - // positive carries the positive range of exponential bucket counts. - Positive ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,8,opt,name=positive,proto3" json:"positive"` - // negative carries the negative range of exponential bucket counts. - Negative ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,9,opt,name=negative,proto3" json:"negative"` - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"` - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - Exemplars []Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars"` - // min is the minimum value over (start_time, end_time]. - // - // Types that are valid to be assigned to Min_: - // *ExponentialHistogramDataPoint_Min - Min_ isExponentialHistogramDataPoint_Min_ `protobuf_oneof:"min_"` - // max is the maximum value over (start_time, end_time]. - // - // Types that are valid to be assigned to Max_: - // *ExponentialHistogramDataPoint_Max - Max_ isExponentialHistogramDataPoint_Max_ `protobuf_oneof:"max_"` - // ZeroThreshold may be optionally set to convey the width of the zero - // region. Where the zero region is defined as the closed interval - // [-ZeroThreshold, ZeroThreshold]. - // When ZeroThreshold is 0, zero count bucket stores values that cannot be - // expressed using the standard exponential formula as well as values that - // have been rounded to zero. - ZeroThreshold float64 `protobuf:"fixed64,14,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` -} - -func (m *ExponentialHistogramDataPoint) Reset() { *m = ExponentialHistogramDataPoint{} } -func (m *ExponentialHistogramDataPoint) String() string { return proto.CompactTextString(m) } -func (*ExponentialHistogramDataPoint) ProtoMessage() {} -func (*ExponentialHistogramDataPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{11} -} -func (m *ExponentialHistogramDataPoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExponentialHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExponentialHistogramDataPoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExponentialHistogramDataPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExponentialHistogramDataPoint.Merge(m, src) -} -func (m *ExponentialHistogramDataPoint) XXX_Size() int { - return m.Size() -} -func (m *ExponentialHistogramDataPoint) XXX_DiscardUnknown() { - xxx_messageInfo_ExponentialHistogramDataPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_ExponentialHistogramDataPoint proto.InternalMessageInfo - -type isExponentialHistogramDataPoint_Sum_ interface { - isExponentialHistogramDataPoint_Sum_() - MarshalTo([]byte) (int, error) - Size() int -} -type isExponentialHistogramDataPoint_Min_ interface { - isExponentialHistogramDataPoint_Min_() - MarshalTo([]byte) (int, error) - Size() int -} -type isExponentialHistogramDataPoint_Max_ interface { - isExponentialHistogramDataPoint_Max_() - MarshalTo([]byte) (int, error) - Size() int -} - -type ExponentialHistogramDataPoint_Sum struct { - Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"` -} -type ExponentialHistogramDataPoint_Min struct { - Min float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"` -} -type ExponentialHistogramDataPoint_Max struct { - Max float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"` -} - -func (*ExponentialHistogramDataPoint_Sum) isExponentialHistogramDataPoint_Sum_() {} -func (*ExponentialHistogramDataPoint_Min) isExponentialHistogramDataPoint_Min_() {} -func (*ExponentialHistogramDataPoint_Max) isExponentialHistogramDataPoint_Max_() {} - -func (m *ExponentialHistogramDataPoint) GetSum_() isExponentialHistogramDataPoint_Sum_ { - if m != nil { - return m.Sum_ - } - return nil -} -func (m *ExponentialHistogramDataPoint) GetMin_() isExponentialHistogramDataPoint_Min_ { - if m != nil { - return m.Min_ - } - return nil -} -func (m *ExponentialHistogramDataPoint) GetMax_() isExponentialHistogramDataPoint_Max_ { - if m != nil { - return m.Max_ - } - return nil -} - -func (m *ExponentialHistogramDataPoint) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *ExponentialHistogramDataPoint) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *ExponentialHistogramDataPoint) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *ExponentialHistogramDataPoint) GetCount() uint64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *ExponentialHistogramDataPoint) GetSum() float64 { - if x, ok := m.GetSum_().(*ExponentialHistogramDataPoint_Sum); ok { - return x.Sum - } - return 0 -} - -func (m *ExponentialHistogramDataPoint) GetScale() int32 { - if m != nil { - return m.Scale - } - return 0 -} - -func (m *ExponentialHistogramDataPoint) GetZeroCount() uint64 { - if m != nil { - return m.ZeroCount - } - return 0 -} - -func (m *ExponentialHistogramDataPoint) GetPositive() ExponentialHistogramDataPoint_Buckets { - if m != nil { - return m.Positive - } - return ExponentialHistogramDataPoint_Buckets{} -} - -func (m *ExponentialHistogramDataPoint) GetNegative() ExponentialHistogramDataPoint_Buckets { - if m != nil { - return m.Negative - } - return ExponentialHistogramDataPoint_Buckets{} -} - -func (m *ExponentialHistogramDataPoint) GetFlags() uint32 { - if m != nil { - return m.Flags - } - return 0 -} - -func (m *ExponentialHistogramDataPoint) GetExemplars() []Exemplar { - if m != nil { - return m.Exemplars - } - return nil -} - -func (m *ExponentialHistogramDataPoint) GetMin() float64 { - if x, ok := m.GetMin_().(*ExponentialHistogramDataPoint_Min); ok { - return x.Min - } - return 0 -} - -func (m *ExponentialHistogramDataPoint) GetMax() float64 { - if x, ok := m.GetMax_().(*ExponentialHistogramDataPoint_Max); ok { - return x.Max - } - return 0 -} - -func (m *ExponentialHistogramDataPoint) GetZeroThreshold() float64 { - if m != nil { - return m.ZeroThreshold - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ExponentialHistogramDataPoint) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ExponentialHistogramDataPoint_Sum)(nil), - (*ExponentialHistogramDataPoint_Min)(nil), - (*ExponentialHistogramDataPoint_Max)(nil), - } -} - -// Buckets are a set of bucket counts, encoded in a contiguous array -// of counts. -type ExponentialHistogramDataPoint_Buckets struct { - // Offset is the bucket index of the first entry in the bucket_counts array. - // - // Note: This uses a varint encoding as a simple form of compression. - Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` - // bucket_counts is an array of count values, where bucket_counts[i] carries - // the count of the bucket at index (offset+i). bucket_counts[i] is the count - // of values greater than base^(offset+i) and less than or equal to - // base^(offset+i+1). - // - // Note: By contrast, the explicit HistogramDataPoint uses - // fixed64. This field is expected to have many buckets, - // especially zeros, so uint64 has been selected to ensure - // varint encoding. - BucketCounts []uint64 `protobuf:"varint,2,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` -} - -func (m *ExponentialHistogramDataPoint_Buckets) Reset() { *m = ExponentialHistogramDataPoint_Buckets{} } -func (m *ExponentialHistogramDataPoint_Buckets) String() string { return proto.CompactTextString(m) } -func (*ExponentialHistogramDataPoint_Buckets) ProtoMessage() {} -func (*ExponentialHistogramDataPoint_Buckets) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{11, 0} -} -func (m *ExponentialHistogramDataPoint_Buckets) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExponentialHistogramDataPoint_Buckets) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExponentialHistogramDataPoint_Buckets) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.Merge(m, src) -} -func (m *ExponentialHistogramDataPoint_Buckets) XXX_Size() int { - return m.Size() -} -func (m *ExponentialHistogramDataPoint_Buckets) XXX_DiscardUnknown() { - xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.DiscardUnknown(m) -} - -var xxx_messageInfo_ExponentialHistogramDataPoint_Buckets proto.InternalMessageInfo - -func (m *ExponentialHistogramDataPoint_Buckets) GetOffset() int32 { - if m != nil { - return m.Offset - } - return 0 -} - -func (m *ExponentialHistogramDataPoint_Buckets) GetBucketCounts() []uint64 { - if m != nil { - return m.BucketCounts - } - return nil -} - -// SummaryDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Summary metric. The count and sum fields represent -// cumulative values. -type SummaryDataPoint struct { - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. - Attributes []v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes"` - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // count is the number of values in the population. Must be non-negative. - Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary - Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` - // (Optional) list of values at different quantiles of the distribution calculated - // from the current snapshot. The quantiles must be strictly increasing. - QuantileValues []*SummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"` - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"` -} - -func (m *SummaryDataPoint) Reset() { *m = SummaryDataPoint{} } -func (m *SummaryDataPoint) String() string { return proto.CompactTextString(m) } -func (*SummaryDataPoint) ProtoMessage() {} -func (*SummaryDataPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{12} -} -func (m *SummaryDataPoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SummaryDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SummaryDataPoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SummaryDataPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_SummaryDataPoint.Merge(m, src) -} -func (m *SummaryDataPoint) XXX_Size() int { - return m.Size() -} -func (m *SummaryDataPoint) XXX_DiscardUnknown() { - xxx_messageInfo_SummaryDataPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_SummaryDataPoint proto.InternalMessageInfo - -func (m *SummaryDataPoint) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *SummaryDataPoint) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *SummaryDataPoint) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *SummaryDataPoint) GetCount() uint64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *SummaryDataPoint) GetSum() float64 { - if m != nil { - return m.Sum - } - return 0 -} - -func (m *SummaryDataPoint) GetQuantileValues() []*SummaryDataPoint_ValueAtQuantile { - if m != nil { - return m.QuantileValues - } - return nil -} - -func (m *SummaryDataPoint) GetFlags() uint32 { - if m != nil { - return m.Flags - } - return 0 -} - -// Represents the value at a given quantile of a distribution. -// -// To record Min and Max values following conventions are used: -// - The 1.0 quantile is equivalent to the maximum value observed. -// - The 0.0 quantile is equivalent to the minimum value observed. -// -// See the following issue for more context: -// https://github.com/open-telemetry/opentelemetry-proto/issues/125 -type SummaryDataPoint_ValueAtQuantile struct { - // The quantile of a distribution. Must be in the interval - // [0.0, 1.0]. - Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"` - // The value at the given quantile of a distribution. - // - // Quantile values must NOT be negative. - Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *SummaryDataPoint_ValueAtQuantile) Reset() { *m = SummaryDataPoint_ValueAtQuantile{} } -func (m *SummaryDataPoint_ValueAtQuantile) String() string { return proto.CompactTextString(m) } -func (*SummaryDataPoint_ValueAtQuantile) ProtoMessage() {} -func (*SummaryDataPoint_ValueAtQuantile) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{12, 0} -} -func (m *SummaryDataPoint_ValueAtQuantile) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SummaryDataPoint_ValueAtQuantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SummaryDataPoint_ValueAtQuantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.Merge(m, src) -} -func (m *SummaryDataPoint_ValueAtQuantile) XXX_Size() int { - return m.Size() -} -func (m *SummaryDataPoint_ValueAtQuantile) XXX_DiscardUnknown() { - xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.DiscardUnknown(m) -} - -var xxx_messageInfo_SummaryDataPoint_ValueAtQuantile proto.InternalMessageInfo - -func (m *SummaryDataPoint_ValueAtQuantile) GetQuantile() float64 { - if m != nil { - return m.Quantile - } - return 0 -} - -func (m *SummaryDataPoint_ValueAtQuantile) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -// A representation of an exemplar, which is a sample input measurement. -// Exemplars also hold information about the environment when the measurement -// was recorded, for example the span and trace ID of the active span when the -// exemplar was recorded. -type Exemplar struct { - // The set of key/value pairs that were filtered out by the aggregator, but - // recorded alongside the original measurement. Only key/value pairs that were - // filtered out by the aggregator should be included - FilteredAttributes []v11.KeyValue `protobuf:"bytes,7,rep,name=filtered_attributes,json=filteredAttributes,proto3" json:"filtered_attributes"` - // time_unix_nano is the exact time when this exemplar was recorded - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // The value of the measurement that was recorded. An exemplar is - // considered invalid when one of the recognized value fields is not present - // inside this oneof. - // - // Types that are valid to be assigned to Value: - // *Exemplar_AsDouble - // *Exemplar_AsInt - Value isExemplar_Value `protobuf_oneof:"value"` - // (Optional) Span ID of the exemplar trace. - // span_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"` - // (Optional) Trace ID of the exemplar trace. - // trace_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"` -} - -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} -func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{13} -} -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) -} -func (m *Exemplar) XXX_Size() int { - return m.Size() -} -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_Exemplar proto.InternalMessageInfo - -type isExemplar_Value interface { - isExemplar_Value() - MarshalTo([]byte) (int, error) - Size() int -} - -type Exemplar_AsDouble struct { - AsDouble float64 `protobuf:"fixed64,3,opt,name=as_double,json=asDouble,proto3,oneof" json:"as_double,omitempty"` -} -type Exemplar_AsInt struct { - AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof" json:"as_int,omitempty"` -} - -func (*Exemplar_AsDouble) isExemplar_Value() {} -func (*Exemplar_AsInt) isExemplar_Value() {} - -func (m *Exemplar) GetValue() isExemplar_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *Exemplar) GetFilteredAttributes() []v11.KeyValue { - if m != nil { - return m.FilteredAttributes - } - return nil -} - -func (m *Exemplar) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *Exemplar) GetAsDouble() float64 { - if x, ok := m.GetValue().(*Exemplar_AsDouble); ok { - return x.AsDouble - } - return 0 -} - -func (m *Exemplar) GetAsInt() int64 { - if x, ok := m.GetValue().(*Exemplar_AsInt); ok { - return x.AsInt - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Exemplar) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Exemplar_AsDouble)(nil), - (*Exemplar_AsInt)(nil), - } -} - -func init() { - proto.RegisterEnum("opentelemetry.proto.metrics.v1.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value) - proto.RegisterEnum("opentelemetry.proto.metrics.v1.DataPointFlags", DataPointFlags_name, DataPointFlags_value) - proto.RegisterType((*MetricsData)(nil), "opentelemetry.proto.metrics.v1.MetricsData") - proto.RegisterType((*ResourceMetrics)(nil), "opentelemetry.proto.metrics.v1.ResourceMetrics") - proto.RegisterType((*ScopeMetrics)(nil), "opentelemetry.proto.metrics.v1.ScopeMetrics") - proto.RegisterType((*Metric)(nil), "opentelemetry.proto.metrics.v1.Metric") - proto.RegisterType((*Gauge)(nil), "opentelemetry.proto.metrics.v1.Gauge") - proto.RegisterType((*Sum)(nil), "opentelemetry.proto.metrics.v1.Sum") - proto.RegisterType((*Histogram)(nil), "opentelemetry.proto.metrics.v1.Histogram") - proto.RegisterType((*ExponentialHistogram)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogram") - proto.RegisterType((*Summary)(nil), "opentelemetry.proto.metrics.v1.Summary") - proto.RegisterType((*NumberDataPoint)(nil), "opentelemetry.proto.metrics.v1.NumberDataPoint") - proto.RegisterType((*HistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.HistogramDataPoint") - proto.RegisterType((*ExponentialHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint") - proto.RegisterType((*ExponentialHistogramDataPoint_Buckets)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets") - proto.RegisterType((*SummaryDataPoint)(nil), "opentelemetry.proto.metrics.v1.SummaryDataPoint") - proto.RegisterType((*SummaryDataPoint_ValueAtQuantile)(nil), "opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile") - proto.RegisterType((*Exemplar)(nil), "opentelemetry.proto.metrics.v1.Exemplar") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/metrics/v1/metrics.proto", fileDescriptor_3c3112f9fa006917) -} - -var fileDescriptor_3c3112f9fa006917 = []byte{ - // 1568 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x4f, 0x1b, 0x49, - 0x16, 0x77, 0xfb, 0xdb, 0xcf, 0x06, 0x9c, 0x5a, 0x96, 0xb4, 0x58, 0xe1, 0x38, 0xce, 0x26, 0xb0, - 0xd9, 0xc8, 0x5e, 0xc8, 0x6a, 0x3f, 0x0e, 0x91, 0x62, 0x63, 0x03, 0x26, 0x80, 0x49, 0xd9, 0x20, - 0x25, 0x8a, 0xd2, 0x2a, 0xec, 0xc2, 0xb4, 0xd2, 0xdd, 0xe5, 0xed, 0xae, 0x46, 0xb0, 0xff, 0xc1, - 0x4a, 0x7b, 0xc8, 0xdf, 0xb1, 0xca, 0x6d, 0x4f, 0x73, 0x9b, 0x63, 0x8e, 0x99, 0xdb, 0x68, 0x34, - 0x8a, 0x46, 0xe4, 0x30, 0x23, 0xcd, 0x3f, 0x31, 0xaa, 0xea, 0x6e, 0xfc, 0x81, 0x89, 0xc9, 0xc7, - 0x21, 0x39, 0xb9, 0xea, 0xd5, 0x7b, 0xbf, 0x7a, 0xaf, 0xde, 0xef, 0xd5, 0x2b, 0x37, 0xdc, 0x63, - 0x3d, 0x6a, 0x71, 0x6a, 0x50, 0x93, 0x72, 0xfb, 0xb4, 0xd4, 0xb3, 0x19, 0x67, 0x25, 0x31, 0xd6, - 0xdb, 0x4e, 0xe9, 0x78, 0x39, 0x18, 0x16, 0xe5, 0x02, 0xca, 0x0d, 0x69, 0x7b, 0xc2, 0x62, 0xa0, - 0x72, 0xbc, 0x3c, 0x3f, 0xdb, 0x65, 0x5d, 0xe6, 0x61, 0x88, 0x91, 0xa7, 0x30, 0x7f, 0x77, 0xdc, - 0x1e, 0x6d, 0x66, 0x9a, 0xcc, 0x12, 0x5b, 0x78, 0x23, 0x5f, 0xb7, 0x38, 0x4e, 0xd7, 0xa6, 0x0e, - 0x73, 0xed, 0x36, 0x15, 0xda, 0xc1, 0xd8, 0xd3, 0x2f, 0xe8, 0x90, 0xde, 0xf6, 0xf6, 0xaf, 0x12, - 0x4e, 0xd0, 0x53, 0xc8, 0x06, 0x0a, 0x9a, 0xef, 0x97, 0xaa, 0xe4, 0x23, 0x4b, 0xe9, 0x95, 0x52, - 0xf1, 0xfd, 0xbe, 0x17, 0xb1, 0x6f, 0xe7, 0xc3, 0xe1, 0x19, 0x7b, 0x58, 0x50, 0xf8, 0x26, 0x0c, - 0x33, 0x23, 0x4a, 0xa8, 0x0b, 0x6a, 0x87, 0xf6, 0x6c, 0xda, 0x26, 0x9c, 0x76, 0x34, 0xa7, 0xcd, - 0x7a, 0xfd, 0x7d, 0x7f, 0x49, 0xc8, 0x8d, 0xef, 0x4d, 0xda, 0xb8, 0x29, 0xac, 0x82, 0x5d, 0xe7, - 0xfa, 0x70, 0x83, 0x72, 0xf4, 0x08, 0x92, 0x81, 0x3f, 0xaa, 0x92, 0x57, 0x96, 0xd2, 0x2b, 0x7f, - 0x1a, 0x8b, 0x7b, 0x7e, 0x3c, 0x03, 0x11, 0x55, 0xa2, 0xaf, 0xdf, 0xde, 0x08, 0xe1, 0x73, 0x00, - 0xf4, 0x18, 0xa6, 0x86, 0x5d, 0x0d, 0x7f, 0x84, 0xa7, 0x19, 0x67, 0xd0, 0xbf, 0x05, 0x00, 0xa7, - 0x7d, 0x44, 0x4d, 0xa2, 0xb9, 0xb6, 0xa1, 0x46, 0xf2, 0xca, 0x52, 0x0a, 0xa7, 0x3c, 0xc9, 0x9e, - 0x6d, 0x14, 0xbe, 0x55, 0x20, 0x33, 0x14, 0x4f, 0x03, 0x62, 0xd2, 0xde, 0x0f, 0xe6, 0xfe, 0xd8, - 0xad, 0x7d, 0x66, 0x1c, 0x2f, 0x17, 0xeb, 0x96, 0xc3, 0x6d, 0xd7, 0xa4, 0x16, 0x27, 0x5c, 0x67, - 0x96, 0x84, 0xf2, 0xc3, 0xf2, 0x70, 0xd0, 0x43, 0x48, 0x0c, 0x47, 0x73, 0x67, 0x52, 0x34, 0x9e, - 0x2b, 0x38, 0x30, 0x9b, 0x14, 0xc2, 0xab, 0x28, 0xc4, 0x3d, 0x13, 0x84, 0x20, 0x6a, 0x11, 0xd3, - 0xf3, 0x3d, 0x85, 0xe5, 0x18, 0xe5, 0x21, 0xdd, 0xa1, 0x4e, 0xdb, 0xd6, 0x7b, 0xc2, 0x41, 0x35, - 0x2c, 0x97, 0x06, 0x45, 0xc2, 0xca, 0xb5, 0x74, 0xee, 0x23, 0xcb, 0x31, 0x7a, 0x00, 0xb1, 0x2e, - 0x71, 0xbb, 0x54, 0x8d, 0xc9, 0x63, 0xb8, 0x3d, 0xc9, 0xe7, 0x75, 0xa1, 0xbc, 0x11, 0xc2, 0x9e, - 0x15, 0xfa, 0x3b, 0x44, 0x1c, 0xd7, 0x54, 0x13, 0xd2, 0xf8, 0xd6, 0xc4, 0xf4, 0xb9, 0xe6, 0x46, - 0x08, 0x0b, 0x0b, 0x54, 0x87, 0xd4, 0x91, 0xee, 0x70, 0xd6, 0xb5, 0x89, 0xa9, 0xa6, 0xde, 0xc3, - 0xa7, 0x01, 0xf3, 0x8d, 0xc0, 0x60, 0x23, 0x84, 0xfb, 0xd6, 0xe8, 0x05, 0xfc, 0x9e, 0x9e, 0xf4, - 0x98, 0x45, 0x2d, 0xae, 0x13, 0x43, 0xeb, 0xc3, 0x82, 0x84, 0xfd, 0xeb, 0x24, 0xd8, 0x5a, 0xdf, - 0x78, 0x70, 0x87, 0x59, 0x3a, 0x46, 0x8e, 0x56, 0x21, 0xe1, 0xb8, 0xa6, 0x49, 0xec, 0x53, 0x35, - 0x2d, 0xe1, 0x17, 0xaf, 0x10, 0xb4, 0x50, 0xdf, 0x08, 0xe1, 0xc0, 0x12, 0xd5, 0x21, 0x69, 0x52, - 0x4e, 0x3a, 0x84, 0x13, 0x35, 0x23, 0xb9, 0xb2, 0x38, 0x81, 0x7e, 0x8f, 0xe8, 0xe9, 0x3e, 0x31, - 0xdc, 0xf3, 0x4a, 0x0a, 0xcc, 0x2b, 0x71, 0x88, 0x8a, 0xdf, 0xcd, 0x68, 0x32, 0x9a, 0x8d, 0x6d, - 0x46, 0x93, 0xf1, 0x6c, 0x62, 0x33, 0x9a, 0x4c, 0x66, 0x53, 0x85, 0x27, 0x10, 0x93, 0xc9, 0x42, - 0xbb, 0x90, 0x16, 0x2a, 0x5a, 0x8f, 0xe9, 0x16, 0xbf, 0xf2, 0x6d, 0xb4, 0xe3, 0x9a, 0x07, 0xd4, - 0x16, 0x77, 0xda, 0xae, 0xb0, 0xc3, 0xd0, 0x09, 0x86, 0x4e, 0xe1, 0x57, 0x05, 0x22, 0x4d, 0xd7, - 0xfc, 0xfc, 0xc8, 0x88, 0xc1, 0x75, 0xd2, 0xed, 0xda, 0xb4, 0x2b, 0xab, 0x4c, 0xe3, 0xd4, 0xec, - 0x31, 0x9b, 0x18, 0x3a, 0x3f, 0x95, 0x84, 0x9e, 0x5e, 0xf9, 0xdb, 0x24, 0xf4, 0x72, 0xdf, 0xbc, - 0xd5, 0xb7, 0xc6, 0x73, 0x64, 0xac, 0x1c, 0xdd, 0x84, 0x8c, 0xee, 0x68, 0x26, 0xb3, 0x18, 0x67, - 0x96, 0xde, 0x96, 0xb5, 0x91, 0xc4, 0x69, 0xdd, 0xd9, 0x0e, 0x44, 0x85, 0xef, 0x14, 0x48, 0xf5, - 0x09, 0xd0, 0x1c, 0x17, 0xf3, 0xca, 0x95, 0xa9, 0xfb, 0x65, 0x84, 0x5d, 0xf8, 0x59, 0x81, 0xd9, - 0x71, 0xbc, 0x47, 0xcf, 0xc7, 0x85, 0xf7, 0xe0, 0x63, 0x4a, 0xe8, 0x0b, 0x89, 0xf4, 0x19, 0x24, - 0xfc, 0x0a, 0x44, 0x8f, 0xc7, 0xc5, 0xf6, 0x97, 0x2b, 0xd6, 0xef, 0xf8, 0x4a, 0x38, 0x0b, 0xc3, - 0xcc, 0x08, 0x9f, 0xd1, 0x36, 0x00, 0xe1, 0xdc, 0xd6, 0x0f, 0x5c, 0x4e, 0x1d, 0x35, 0xf1, 0x31, - 0xf5, 0x3d, 0x00, 0x80, 0x4a, 0x30, 0xeb, 0x70, 0x62, 0x73, 0x8d, 0xeb, 0x26, 0xd5, 0x5c, 0x4b, - 0x3f, 0xd1, 0x2c, 0x62, 0x31, 0x79, 0x5c, 0x71, 0x7c, 0x4d, 0xae, 0xb5, 0x74, 0x93, 0xee, 0x59, - 0xfa, 0xc9, 0x0e, 0xb1, 0x18, 0xfa, 0x23, 0x4c, 0x8f, 0xa8, 0x46, 0xa4, 0x6a, 0x86, 0x0f, 0x6a, - 0x2d, 0x40, 0x8a, 0x38, 0x5a, 0x87, 0xb9, 0x07, 0x06, 0x55, 0xa3, 0x79, 0x65, 0x49, 0xd9, 0x08, - 0xe1, 0x24, 0x71, 0xaa, 0x52, 0x82, 0xae, 0x43, 0x9c, 0x38, 0x9a, 0x6e, 0x71, 0x35, 0x9e, 0x57, - 0x96, 0xb2, 0xe2, 0xc6, 0x27, 0x4e, 0xdd, 0xe2, 0x68, 0x0b, 0x52, 0xf4, 0x84, 0x9a, 0x3d, 0x83, - 0xd8, 0x8e, 0x1a, 0x93, 0xc1, 0x2d, 0x4d, 0xa6, 0x87, 0x67, 0xe0, 0x47, 0xd7, 0x07, 0x40, 0xb3, - 0x10, 0x3b, 0x34, 0x48, 0xd7, 0x51, 0x93, 0x79, 0x65, 0x69, 0x0a, 0x7b, 0x93, 0x4a, 0x02, 0x62, - 0xc7, 0xe2, 0x34, 0x36, 0xa3, 0x49, 0x25, 0x1b, 0x2e, 0xfc, 0x18, 0x01, 0x74, 0x91, 0x56, 0x23, - 0xe7, 0x9c, 0xfa, 0x42, 0xcf, 0x79, 0x16, 0x62, 0x6d, 0xe6, 0x5a, 0x5c, 0x9e, 0x71, 0x1c, 0x7b, - 0x13, 0x84, 0xbc, 0xbe, 0x19, 0xf3, 0xcf, 0x5d, 0xb6, 0xc4, 0x5b, 0x30, 0x75, 0xe0, 0xb6, 0x5f, - 0x50, 0xae, 0x49, 0x1d, 0x47, 0x8d, 0xe7, 0x23, 0x02, 0xce, 0x13, 0xae, 0x4a, 0x19, 0x5a, 0x84, - 0x19, 0x7a, 0xd2, 0x33, 0xf4, 0xb6, 0xce, 0xb5, 0x03, 0xe6, 0x5a, 0x1d, 0x8f, 0x61, 0x0a, 0x9e, - 0x0e, 0xc4, 0x15, 0x29, 0x1d, 0xce, 0x53, 0xf2, 0xb3, 0xe5, 0x09, 0x06, 0xf2, 0x24, 0xa2, 0x30, - 0x75, 0x4b, 0x36, 0x42, 0x65, 0x43, 0xc1, 0x62, 0x22, 0x65, 0xe4, 0x44, 0xcd, 0x48, 0x59, 0x18, - 0x8b, 0x89, 0x68, 0x52, 0x8e, 0x6b, 0x6a, 0xe2, 0xd7, 0xd4, 0x2d, 0xef, 0x97, 0x9c, 0x68, 0x7e, - 0x7a, 0xff, 0x13, 0x87, 0x85, 0xf7, 0x5e, 0x20, 0x23, 0x99, 0x56, 0xbe, 0xfa, 0x4c, 0xcf, 0x8a, - 0xb7, 0x27, 0x31, 0xa8, 0xac, 0xad, 0x6b, 0xd8, 0x9b, 0x88, 0xe7, 0xdf, 0xbf, 0xa9, 0xcd, 0xbc, - 0xec, 0xcb, 0x27, 0x55, 0x1c, 0xa7, 0x84, 0x44, 0xa6, 0x1e, 0x75, 0x21, 0xd9, 0x63, 0x8e, 0xce, - 0xf5, 0x63, 0x2a, 0xab, 0x25, 0xbd, 0x52, 0xfb, 0xa4, 0x6b, 0xb9, 0x58, 0x91, 0xbc, 0x72, 0x82, - 0x27, 0x45, 0x00, 0x2e, 0x36, 0xb2, 0xe4, 0x45, 0x7a, 0x4c, 0xfd, 0x97, 0xd9, 0xe7, 0xdd, 0x28, - 0x00, 0xbf, 0x84, 0x54, 0x43, 0xc4, 0x4d, 0x7f, 0x2a, 0x71, 0x7d, 0x8a, 0x66, 0xc6, 0x50, 0x74, - 0x6a, 0x80, 0xa2, 0xe8, 0x36, 0x4c, 0xcb, 0xc3, 0xe7, 0x47, 0x36, 0x75, 0x8e, 0x98, 0xd1, 0x51, - 0xa7, 0xc5, 0x32, 0x9e, 0x12, 0xd2, 0x56, 0x20, 0x9c, 0x5f, 0x83, 0x84, 0x1f, 0x0d, 0x9a, 0x83, - 0x38, 0x3b, 0x3c, 0x74, 0x28, 0x97, 0xaf, 0xf0, 0x6b, 0xd8, 0x9f, 0x5d, 0x2c, 0x63, 0xf1, 0x6f, - 0x20, 0x3a, 0x5c, 0xc6, 0x97, 0x55, 0x44, 0xe1, 0x55, 0x04, 0xb2, 0xa3, 0x0d, 0xe7, 0x2b, 0x69, - 0x28, 0xe3, 0xe9, 0x9f, 0x1d, 0xa0, 0xbf, 0x47, 0x7e, 0x1d, 0x66, 0xfe, 0xe5, 0x12, 0x8b, 0xeb, - 0x06, 0xd5, 0xe4, 0x2d, 0xef, 0x5d, 0x74, 0xe9, 0x95, 0x87, 0x1f, 0xda, 0x89, 0x8b, 0x32, 0xc2, - 0x32, 0x7f, 0xec, 0xc3, 0xe1, 0xe9, 0x00, 0x58, 0x2e, 0x5c, 0xd2, 0x5d, 0xe6, 0x57, 0x61, 0x66, - 0xc4, 0x10, 0xcd, 0x43, 0x32, 0x30, 0x95, 0xd9, 0x54, 0xf0, 0xf9, 0x5c, 0x80, 0x48, 0x37, 0xe5, - 0xf9, 0x28, 0x78, 0xa8, 0x33, 0xbd, 0x8c, 0x40, 0x32, 0xe0, 0x1e, 0x7a, 0x0e, 0xbf, 0x3b, 0xd4, - 0x0d, 0x4e, 0x6d, 0xda, 0xd1, 0x3e, 0x35, 0x5f, 0x28, 0x40, 0x2a, 0xf7, 0xf3, 0x76, 0x31, 0x0d, - 0xe1, 0x49, 0x7d, 0x3d, 0x72, 0xf5, 0xbe, 0xfe, 0x04, 0x12, 0x4e, 0x8f, 0x58, 0x9a, 0xde, 0x91, - 0x09, 0xcc, 0x54, 0x1e, 0x0a, 0x47, 0x7e, 0x78, 0x7b, 0xe3, 0x1f, 0x5d, 0x36, 0xe2, 0xbb, 0xce, - 0x4a, 0x6d, 0x66, 0x18, 0xb4, 0xcd, 0x99, 0x5d, 0xea, 0x89, 0xd7, 0x50, 0x49, 0xb7, 0x38, 0xb5, - 0x2d, 0x62, 0x94, 0xc4, 0xac, 0xd8, 0xec, 0x11, 0xab, 0x5e, 0xc5, 0x71, 0x01, 0x58, 0xef, 0xa0, - 0x67, 0x90, 0xe4, 0x36, 0x69, 0x53, 0x81, 0x1d, 0x93, 0xd8, 0x65, 0x1f, 0xfb, 0x9f, 0x1f, 0x8e, - 0xdd, 0x12, 0x48, 0xf5, 0x2a, 0x4e, 0x48, 0xc8, 0x7a, 0x67, 0xe4, 0xb1, 0x70, 0xf7, 0xbf, 0x0a, - 0xcc, 0x8d, 0x7f, 0x22, 0xa2, 0x45, 0xb8, 0x55, 0x5e, 0x5f, 0xc7, 0xb5, 0xf5, 0x72, 0xab, 0xde, - 0xd8, 0xd1, 0x5a, 0xb5, 0xed, 0xdd, 0x06, 0x2e, 0x6f, 0xd5, 0x5b, 0x4f, 0xb4, 0xbd, 0x9d, 0xe6, - 0x6e, 0x6d, 0xb5, 0xbe, 0x56, 0xaf, 0x55, 0xb3, 0x21, 0x74, 0x13, 0x16, 0x2e, 0x53, 0xac, 0xd6, - 0xb6, 0x5a, 0xe5, 0xac, 0x82, 0xee, 0x40, 0xe1, 0x32, 0x95, 0xd5, 0xbd, 0xed, 0xbd, 0xad, 0x72, - 0xab, 0xbe, 0x5f, 0xcb, 0x86, 0xef, 0x3e, 0x87, 0xe9, 0x73, 0xbe, 0xae, 0xc9, 0xfb, 0xed, 0x06, - 0xfc, 0xa1, 0x5a, 0x6e, 0x95, 0xb5, 0xdd, 0x46, 0x7d, 0xa7, 0xa5, 0xad, 0x6d, 0x95, 0xd7, 0x9b, - 0x5a, 0xb5, 0xa1, 0xed, 0x34, 0x5a, 0xda, 0x5e, 0xb3, 0x96, 0x0d, 0xa1, 0x3f, 0xc3, 0xe2, 0x05, - 0x85, 0x9d, 0x86, 0x86, 0x6b, 0xab, 0x0d, 0x5c, 0xad, 0x55, 0xb5, 0xfd, 0xf2, 0xd6, 0x5e, 0x4d, - 0xdb, 0x2e, 0x37, 0x1f, 0x65, 0x95, 0xca, 0xff, 0x95, 0xd7, 0x67, 0x39, 0xe5, 0xcd, 0x59, 0x4e, - 0xf9, 0xe9, 0x2c, 0xa7, 0xbc, 0x7c, 0x97, 0x0b, 0xbd, 0x79, 0x97, 0x0b, 0x7d, 0xff, 0x2e, 0x17, - 0x82, 0x9b, 0x3a, 0x9b, 0x50, 0x51, 0x95, 0x8c, 0xff, 0x35, 0x64, 0x57, 0x2c, 0xec, 0x2a, 0x4f, - 0x6b, 0x1f, 0x9c, 0x0f, 0xef, 0x03, 0x59, 0x97, 0x5a, 0x03, 0xdf, 0xec, 0xfe, 0x17, 0xce, 0x35, - 0x7a, 0xd4, 0x6a, 0x9d, 0x83, 0x48, 0x78, 0xff, 0x73, 0x87, 0x53, 0xdc, 0x5f, 0x3e, 0x88, 0x4b, - 0xab, 0xfb, 0xbf, 0x05, 0x00, 0x00, 0xff, 0xff, 0x00, 0xa3, 0x78, 0x2c, 0xfd, 0x13, 0x00, 0x00, -} - -func (m *MetricsData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceMetrics) > 0 { - for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResourceMetrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceMetrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.DeprecatedScopeMetrics) > 0 { - for iNdEx := len(m.DeprecatedScopeMetrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DeprecatedScopeMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3e - i-- - dAtA[i] = 0xc2 - } - } - if len(m.SchemaUrl) > 0 { - i -= len(m.SchemaUrl) - copy(dAtA[i:], m.SchemaUrl) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.SchemaUrl))) - i-- - dAtA[i] = 0x1a - } - if len(m.ScopeMetrics) > 0 { - for iNdEx := len(m.ScopeMetrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ScopeMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ScopeMetrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScopeMetrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScopeMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SchemaUrl) > 0 { - i -= len(m.SchemaUrl) - copy(dAtA[i:], m.SchemaUrl) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.SchemaUrl))) - i-- - dAtA[i] = 0x1a - } - if len(m.Metrics) > 0 { - for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Metric) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metric) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Metadata) > 0 { - for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - } - } - if m.Data != nil { - { - size := m.Data.Size() - i -= size - if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if len(m.Unit) > 0 { - i -= len(m.Unit) - copy(dAtA[i:], m.Unit) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit))) - i-- - dAtA[i] = 0x1a - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Metric_Gauge) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric_Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Gauge != nil { - { - size, err := m.Gauge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - return len(dAtA) - i, nil -} -func (m *Metric_Sum) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Sum != nil { - { - size, err := m.Sum.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - return len(dAtA) - i, nil -} -func (m *Metric_Histogram) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric_Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Histogram != nil { - { - size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - return len(dAtA) - i, nil -} -func (m *Metric_ExponentialHistogram) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric_ExponentialHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ExponentialHistogram != nil { - { - size, err := m.ExponentialHistogram.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - return len(dAtA) - i, nil -} -func (m *Metric_Summary) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric_Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Summary != nil { - { - size, err := m.Summary.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - return len(dAtA) - i, nil -} -func (m *Gauge) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Gauge) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.DataPoints) > 0 { - for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Sum) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Sum) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.IsMonotonic { - i-- - if m.IsMonotonic { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.AggregationTemporality != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) - i-- - dAtA[i] = 0x10 - } - if len(m.DataPoints) > 0 { - for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Histogram) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AggregationTemporality != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) - i-- - dAtA[i] = 0x10 - } - if len(m.DataPoints) > 0 { - for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ExponentialHistogram) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExponentialHistogram) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExponentialHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AggregationTemporality != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) - i-- - dAtA[i] = 0x10 - } - if len(m.DataPoints) > 0 { - for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Summary) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Summary) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.DataPoints) > 0 { - for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *NumberDataPoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NumberDataPoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NumberDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Flags != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Flags)) - i-- - dAtA[i] = 0x40 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if m.Value != nil { - { - size := m.Value.Size() - i -= size - if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if len(m.Exemplars) > 0 { - for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x19 - } - if m.StartTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - return len(dAtA) - i, nil -} - -func (m *NumberDataPoint_AsDouble) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NumberDataPoint_AsDouble) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble)))) - i-- - dAtA[i] = 0x21 - return len(dAtA) - i, nil -} -func (m *NumberDataPoint_AsInt) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NumberDataPoint_AsInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt)) - i-- - dAtA[i] = 0x31 - return len(dAtA) - i, nil -} -func (m *HistogramDataPoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HistogramDataPoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Max_ != nil { - { - size := m.Max_.Size() - i -= size - if _, err := m.Max_.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if m.Min_ != nil { - { - size := m.Min_.Size() - i -= size - if _, err := m.Min_.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if m.Flags != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Flags)) - i-- - dAtA[i] = 0x50 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - } - if len(m.Exemplars) > 0 { - for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if len(m.ExplicitBounds) > 0 { - for iNdEx := len(m.ExplicitBounds) - 1; iNdEx >= 0; iNdEx-- { - f8 := math.Float64bits(float64(m.ExplicitBounds[iNdEx])) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f8)) - } - i = encodeVarintMetrics(dAtA, i, uint64(len(m.ExplicitBounds)*8)) - i-- - dAtA[i] = 0x3a - } - if len(m.BucketCounts) > 0 { - for iNdEx := len(m.BucketCounts) - 1; iNdEx >= 0; iNdEx-- { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.BucketCounts[iNdEx])) - } - i = encodeVarintMetrics(dAtA, i, uint64(len(m.BucketCounts)*8)) - i-- - dAtA[i] = 0x32 - } - if m.Sum_ != nil { - { - size := m.Sum_.Size() - i -= size - if _, err := m.Sum_.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if m.Count != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) - i-- - dAtA[i] = 0x21 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x19 - } - if m.StartTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - return len(dAtA) - i, nil -} - -func (m *HistogramDataPoint_Sum) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HistogramDataPoint_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) - i-- - dAtA[i] = 0x29 - return len(dAtA) - i, nil -} -func (m *HistogramDataPoint_Min) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HistogramDataPoint_Min) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Min)))) - i-- - dAtA[i] = 0x59 - return len(dAtA) - i, nil -} -func (m *HistogramDataPoint_Max) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HistogramDataPoint_Max) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Max)))) - i-- - dAtA[i] = 0x61 - return len(dAtA) - i, nil -} -func (m *ExponentialHistogramDataPoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExponentialHistogramDataPoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExponentialHistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ZeroThreshold != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) - i-- - dAtA[i] = 0x71 - } - if m.Max_ != nil { - { - size := m.Max_.Size() - i -= size - if _, err := m.Max_.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if m.Min_ != nil { - { - size := m.Min_.Size() - i -= size - if _, err := m.Min_.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if len(m.Exemplars) > 0 { - for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - } - if m.Flags != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Flags)) - i-- - dAtA[i] = 0x50 - } - { - size, err := m.Negative.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - { - size, err := m.Positive.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - if m.ZeroCount != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ZeroCount)) - i-- - dAtA[i] = 0x39 - } - if m.Scale != 0 { - i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Scale)<<1)^uint32((m.Scale>>31)))) - i-- - dAtA[i] = 0x30 - } - if m.Sum_ != nil { - { - size := m.Sum_.Size() - i -= size - if _, err := m.Sum_.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if m.Count != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) - i-- - dAtA[i] = 0x21 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x19 - } - if m.StartTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ExponentialHistogramDataPoint_Sum) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExponentialHistogramDataPoint_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) - i-- - dAtA[i] = 0x29 - return len(dAtA) - i, nil -} -func (m *ExponentialHistogramDataPoint_Min) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExponentialHistogramDataPoint_Min) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Min)))) - i-- - dAtA[i] = 0x61 - return len(dAtA) - i, nil -} -func (m *ExponentialHistogramDataPoint_Max) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExponentialHistogramDataPoint_Max) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Max)))) - i-- - dAtA[i] = 0x69 - return len(dAtA) - i, nil -} -func (m *ExponentialHistogramDataPoint_Buckets) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExponentialHistogramDataPoint_Buckets) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExponentialHistogramDataPoint_Buckets) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.BucketCounts) > 0 { - dAtA12 := make([]byte, len(m.BucketCounts)*10) - var j11 int - for _, num := range m.BucketCounts { - for num >= 1<<7 { - dAtA12[j11] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j11++ - } - dAtA12[j11] = uint8(num) - j11++ - } - i -= j11 - copy(dAtA[i:], dAtA12[:j11]) - i = encodeVarintMetrics(dAtA, i, uint64(j11)) - i-- - dAtA[i] = 0x12 - } - if m.Offset != 0 { - i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *SummaryDataPoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SummaryDataPoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SummaryDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Flags != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Flags)) - i-- - dAtA[i] = 0x40 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if len(m.QuantileValues) > 0 { - for iNdEx := len(m.QuantileValues) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.QuantileValues[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if m.Sum != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) - i-- - dAtA[i] = 0x29 - } - if m.Count != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) - i-- - dAtA[i] = 0x21 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x19 - } - if m.StartTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - return len(dAtA) - i, nil -} - -func (m *SummaryDataPoint_ValueAtQuantile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SummaryDataPoint_ValueAtQuantile) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SummaryDataPoint_ValueAtQuantile) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i-- - dAtA[i] = 0x11 - } - if m.Quantile != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile)))) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func (m *Exemplar) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.FilteredAttributes) > 0 { - for iNdEx := len(m.FilteredAttributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.FilteredAttributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if m.Value != nil { - { - size := m.Value.Size() - i -= size - if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - { - size := m.TraceId.Size() - i -= size - if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - { - size := m.SpanId.Size() - i -= size - if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - return len(dAtA) - i, nil -} - -func (m *Exemplar_AsDouble) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Exemplar_AsDouble) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble)))) - i-- - dAtA[i] = 0x19 - return len(dAtA) - i, nil -} -func (m *Exemplar_AsInt) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Exemplar_AsInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt)) - i-- - dAtA[i] = 0x31 - return len(dAtA) - i, nil -} -func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { - offset -= sovMetrics(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MetricsData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceMetrics) > 0 { - for _, e := range m.ResourceMetrics { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *ResourceMetrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Resource.Size() - n += 1 + l + sovMetrics(uint64(l)) - if len(m.ScopeMetrics) > 0 { - for _, e := range m.ScopeMetrics { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - l = len(m.SchemaUrl) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if len(m.DeprecatedScopeMetrics) > 0 { - for _, e := range m.DeprecatedScopeMetrics { - l = e.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *ScopeMetrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Scope.Size() - n += 1 + l + sovMetrics(uint64(l)) - if len(m.Metrics) > 0 { - for _, e := range m.Metrics { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - l = len(m.SchemaUrl) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} - -func (m *Metric) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - l = len(m.Unit) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Data != nil { - n += m.Data.Size() - } - if len(m.Metadata) > 0 { - for _, e := range m.Metadata { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *Metric_Gauge) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Gauge != nil { - l = m.Gauge.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} -func (m *Metric_Sum) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Sum != nil { - l = m.Sum.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} -func (m *Metric_Histogram) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Histogram != nil { - l = m.Histogram.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} -func (m *Metric_ExponentialHistogram) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ExponentialHistogram != nil { - l = m.ExponentialHistogram.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} -func (m *Metric_Summary) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Summary != nil { - l = m.Summary.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} -func (m *Gauge) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.DataPoints) > 0 { - for _, e := range m.DataPoints { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *Sum) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.DataPoints) > 0 { - for _, e := range m.DataPoints { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.AggregationTemporality != 0 { - n += 1 + sovMetrics(uint64(m.AggregationTemporality)) - } - if m.IsMonotonic { - n += 2 - } - return n -} - -func (m *Histogram) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.DataPoints) > 0 { - for _, e := range m.DataPoints { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.AggregationTemporality != 0 { - n += 1 + sovMetrics(uint64(m.AggregationTemporality)) - } - return n -} - -func (m *ExponentialHistogram) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.DataPoints) > 0 { - for _, e := range m.DataPoints { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.AggregationTemporality != 0 { - n += 1 + sovMetrics(uint64(m.AggregationTemporality)) - } - return n -} - -func (m *Summary) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.DataPoints) > 0 { - for _, e := range m.DataPoints { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *NumberDataPoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimeUnixNano != 0 { - n += 9 - } - if m.TimeUnixNano != 0 { - n += 9 - } - if m.Value != nil { - n += m.Value.Size() - } - if len(m.Exemplars) > 0 { - for _, e := range m.Exemplars { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.Flags != 0 { - n += 1 + sovMetrics(uint64(m.Flags)) - } - return n -} - -func (m *NumberDataPoint_AsDouble) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *NumberDataPoint_AsInt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *HistogramDataPoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimeUnixNano != 0 { - n += 9 - } - if m.TimeUnixNano != 0 { - n += 9 - } - if m.Count != 0 { - n += 9 - } - if m.Sum_ != nil { - n += m.Sum_.Size() - } - if len(m.BucketCounts) > 0 { - n += 1 + sovMetrics(uint64(len(m.BucketCounts)*8)) + len(m.BucketCounts)*8 - } - if len(m.ExplicitBounds) > 0 { - n += 1 + sovMetrics(uint64(len(m.ExplicitBounds)*8)) + len(m.ExplicitBounds)*8 - } - if len(m.Exemplars) > 0 { - for _, e := range m.Exemplars { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.Flags != 0 { - n += 1 + sovMetrics(uint64(m.Flags)) - } - if m.Min_ != nil { - n += m.Min_.Size() - } - if m.Max_ != nil { - n += m.Max_.Size() - } - return n -} - -func (m *HistogramDataPoint_Sum) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *HistogramDataPoint_Min) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *HistogramDataPoint_Max) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *ExponentialHistogramDataPoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.StartTimeUnixNano != 0 { - n += 9 - } - if m.TimeUnixNano != 0 { - n += 9 - } - if m.Count != 0 { - n += 9 - } - if m.Sum_ != nil { - n += m.Sum_.Size() - } - if m.Scale != 0 { - n += 1 + sozMetrics(uint64(m.Scale)) - } - if m.ZeroCount != 0 { - n += 9 - } - l = m.Positive.Size() - n += 1 + l + sovMetrics(uint64(l)) - l = m.Negative.Size() - n += 1 + l + sovMetrics(uint64(l)) - if m.Flags != 0 { - n += 1 + sovMetrics(uint64(m.Flags)) - } - if len(m.Exemplars) > 0 { - for _, e := range m.Exemplars { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.Min_ != nil { - n += m.Min_.Size() - } - if m.Max_ != nil { - n += m.Max_.Size() - } - if m.ZeroThreshold != 0 { - n += 9 - } - return n -} - -func (m *ExponentialHistogramDataPoint_Sum) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *ExponentialHistogramDataPoint_Min) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *ExponentialHistogramDataPoint_Max) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *ExponentialHistogramDataPoint_Buckets) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Offset != 0 { - n += 1 + sozMetrics(uint64(m.Offset)) - } - if len(m.BucketCounts) > 0 { - l = 0 - for _, e := range m.BucketCounts { - l += sovMetrics(uint64(e)) - } - n += 1 + sovMetrics(uint64(l)) + l - } - return n -} - -func (m *SummaryDataPoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimeUnixNano != 0 { - n += 9 - } - if m.TimeUnixNano != 0 { - n += 9 - } - if m.Count != 0 { - n += 9 - } - if m.Sum != 0 { - n += 9 - } - if len(m.QuantileValues) > 0 { - for _, e := range m.QuantileValues { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.Flags != 0 { - n += 1 + sovMetrics(uint64(m.Flags)) - } - return n -} - -func (m *SummaryDataPoint_ValueAtQuantile) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Quantile != 0 { - n += 9 - } - if m.Value != 0 { - n += 9 - } - return n -} - -func (m *Exemplar) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TimeUnixNano != 0 { - n += 9 - } - if m.Value != nil { - n += m.Value.Size() - } - l = m.SpanId.Size() - n += 1 + l + sovMetrics(uint64(l)) - l = m.TraceId.Size() - n += 1 + l + sovMetrics(uint64(l)) - if len(m.FilteredAttributes) > 0 { - for _, e := range m.FilteredAttributes { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *Exemplar_AsDouble) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *Exemplar_AsInt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} - -func sovMetrics(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMetrics(x uint64) (n int) { - return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MetricsData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceMetrics = append(m.ResourceMetrics, &ResourceMetrics{}) - if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceMetrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceMetrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceMetrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ScopeMetrics = append(m.ScopeMetrics, &ScopeMetrics{}) - if err := m.ScopeMetrics[len(m.ScopeMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 1000: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeMetrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeprecatedScopeMetrics = append(m.DeprecatedScopeMetrics, &ScopeMetrics{}) - if err := m.DeprecatedScopeMetrics[len(m.DeprecatedScopeMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScopeMetrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScopeMetrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScopeMetrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metrics = append(m.Metrics, &Metric{}) - if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metric) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metric: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Unit = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &Gauge{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Data = &Metric_Gauge{v} - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &Sum{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Data = &Metric_Sum{v} - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &Histogram{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Data = &Metric_Histogram{v} - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ExponentialHistogram{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Data = &Metric_ExponentialHistogram{v} - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &Summary{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Data = &Metric_Summary{v} - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metadata = append(m.Metadata, v11.KeyValue{}) - if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Gauge) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Gauge: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataPoints = append(m.DataPoints, &NumberDataPoint{}) - if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Sum) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Sum: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Sum: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataPoints = append(m.DataPoints, &NumberDataPoint{}) - if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) - } - m.AggregationTemporality = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsMonotonic = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Histogram) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Histogram: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataPoints = append(m.DataPoints, &HistogramDataPoint{}) - if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) - } - m.AggregationTemporality = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExponentialHistogram) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExponentialHistogram: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExponentialHistogram: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataPoints = append(m.DataPoints, &ExponentialHistogramDataPoint{}) - if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) - } - m.AggregationTemporality = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Summary) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Summary: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataPoints = append(m.DataPoints, &SummaryDataPoint{}) - if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NumberDataPoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NumberDataPoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NumberDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) - } - m.StartTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = &NumberDataPoint_AsDouble{float64(math.Float64frombits(v))} - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exemplars = append(m.Exemplars, Exemplar{}) - if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType) - } - var v int64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = &NumberDataPoint_AsInt{v} - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) - } - m.Flags = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Flags |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HistogramDataPoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HistogramDataPoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) - } - m.StartTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 5: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Sum_ = &HistogramDataPoint_Sum{float64(math.Float64frombits(v))} - case 6: - if wireType == 1 { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.BucketCounts = append(m.BucketCounts, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - elementCount = packedLen / 8 - if elementCount != 0 && len(m.BucketCounts) == 0 { - m.BucketCounts = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.BucketCounts = append(m.BucketCounts, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType) - } - case 7: - if wireType == 1 { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.ExplicitBounds = append(m.ExplicitBounds, v2) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - elementCount = packedLen / 8 - if elementCount != 0 && len(m.ExplicitBounds) == 0 { - m.ExplicitBounds = make([]float64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.ExplicitBounds = append(m.ExplicitBounds, v2) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType) - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exemplars = append(m.Exemplars, Exemplar{}) - if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) - } - m.Flags = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Flags |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Min_ = &HistogramDataPoint_Min{float64(math.Float64frombits(v))} - case 12: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Max_ = &HistogramDataPoint_Max{float64(math.Float64frombits(v))} - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExponentialHistogramDataPoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExponentialHistogramDataPoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExponentialHistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) - } - m.StartTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 5: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Sum_ = &ExponentialHistogramDataPoint_Sum{float64(math.Float64frombits(v))} - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) - m.Scale = v - case 7: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType) - } - m.ZeroCount = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.ZeroCount = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Positive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Positive.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Negative", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Negative.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) - } - m.Flags = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Flags |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exemplars = append(m.Exemplars, Exemplar{}) - if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Min_ = &ExponentialHistogramDataPoint_Min{float64(math.Float64frombits(v))} - case 13: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Max_ = &ExponentialHistogramDataPoint_Max{float64(math.Float64frombits(v))} - case 14: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.ZeroThreshold = float64(math.Float64frombits(v)) - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExponentialHistogramDataPoint_Buckets) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Buckets: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Buckets: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) - m.Offset = v - case 2: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.BucketCounts = append(m.BucketCounts, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.BucketCounts) == 0 { - m.BucketCounts = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.BucketCounts = append(m.BucketCounts, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SummaryDataPoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SummaryDataPoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SummaryDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) - } - m.StartTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 5: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Sum = float64(math.Float64frombits(v)) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.QuantileValues = append(m.QuantileValues, &SummaryDataPoint_ValueAtQuantile{}) - if err := m.QuantileValues[len(m.QuantileValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) - } - m.Flags = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Flags |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SummaryDataPoint_ValueAtQuantile) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValueAtQuantile: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValueAtQuantile: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Quantile = float64(math.Float64frombits(v)) - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = float64(math.Float64frombits(v)) - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Exemplar) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = &Exemplar_AsDouble{float64(math.Float64frombits(v))} - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType) - } - var v int64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = &Exemplar_AsInt{v} - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FilteredAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FilteredAttributes = append(m.FilteredAttributes, v11.KeyValue{}) - if err := m.FilteredAttributes[len(m.FilteredAttributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMetrics(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMetrics - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMetrics - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMetrics - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go deleted file mode 100644 index e1d24c4eeda..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go +++ /dev/null @@ -1,5259 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/profiles/v1development/profiles.proto - -package v1development - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - - go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data" - v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Specifies the method of aggregating metric values, either DELTA (change since last report) -// or CUMULATIVE (total since a fixed start time). -type AggregationTemporality int32 - -const ( - // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. - AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0 - //* DELTA is an AggregationTemporality for a profiler which reports - //changes since last report time. Successive metrics contain aggregation of - //values from continuous and non-overlapping intervals. - // - //The values for a DELTA metric are based only on the time interval - //associated with one measurement cycle. There is no dependency on - //previous measurements like is the case for CUMULATIVE metrics. - // - //For example, consider a system measuring the number of requests that - //it receives and reports the sum of these requests every second as a - //DELTA metric: - // - //1. The system starts receiving at time=t_0. - //2. A request is received, the system measures 1 request. - //3. A request is received, the system measures 1 request. - //4. A request is received, the system measures 1 request. - //5. The 1 second collection cycle ends. A metric is exported for the - //number of requests received over the interval of time t_0 to - //t_0+1 with a value of 3. - //6. A request is received, the system measures 1 request. - //7. A request is received, the system measures 1 request. - //8. The 1 second collection cycle ends. A metric is exported for the - //number of requests received over the interval of time t_0+1 to - //t_0+2 with a value of 2. - AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1 - //* CUMULATIVE is an AggregationTemporality for a profiler which - //reports changes since a fixed start time. This means that current values - //of a CUMULATIVE metric depend on all previous measurements since the - //start time. Because of this, the sender is required to retain this state - //in some form. If this state is lost or invalidated, the CUMULATIVE metric - //values MUST be reset and a new fixed start time following the last - //reported measurement time sent MUST be used. - // - //For example, consider a system measuring the number of requests that - //it receives and reports the sum of these requests every second as a - //CUMULATIVE metric: - // - //1. The system starts receiving at time=t_0. - //2. A request is received, the system measures 1 request. - //3. A request is received, the system measures 1 request. - //4. A request is received, the system measures 1 request. - //5. The 1 second collection cycle ends. A metric is exported for the - //number of requests received over the interval of time t_0 to - //t_0+1 with a value of 3. - //6. A request is received, the system measures 1 request. - //7. A request is received, the system measures 1 request. - //8. The 1 second collection cycle ends. A metric is exported for the - //number of requests received over the interval of time t_0 to - //t_0+2 with a value of 5. - //9. The system experiences a fault and loses state. - //10. The system recovers and resumes receiving at time=t_1. - //11. A request is received, the system measures 1 request. - //12. The 1 second collection cycle ends. A metric is exported for the - //number of requests received over the interval of time t_1 to - //t_1+1 with a value of 1. - // - //Note: Even though, when reporting changes since last report time, using - //CUMULATIVE is valid, it is not recommended. - AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2 -) - -var AggregationTemporality_name = map[int32]string{ - 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED", - 1: "AGGREGATION_TEMPORALITY_DELTA", - 2: "AGGREGATION_TEMPORALITY_CUMULATIVE", -} - -var AggregationTemporality_value = map[string]int32{ - "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0, - "AGGREGATION_TEMPORALITY_DELTA": 1, - "AGGREGATION_TEMPORALITY_CUMULATIVE": 2, -} - -func (x AggregationTemporality) String() string { - return proto.EnumName(AggregationTemporality_name, int32(x)) -} - -func (AggregationTemporality) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{0} -} - -// ProfilesDictionary represents the profiles data shared across the -// entire message being sent. -// -// Note that all fields in this message MUST have a zero value encoded as the first element. -// This allows for _index fields pointing into the dictionary to use a 0 pointer value -// to indicate 'null' / 'not set'. Unless otherwise defined, a 'zero value' message value -// is one with all default field values, so as to minimize wire encoded size. -type ProfilesDictionary struct { - // Mappings from address ranges to the image/binary/library mapped - // into that address range referenced by locations via Location.mapping_index. - MappingTable []*Mapping `protobuf:"bytes,1,rep,name=mapping_table,json=mappingTable,proto3" json:"mapping_table,omitempty"` - // Locations referenced by samples via Stack.location_indices. - LocationTable []*Location `protobuf:"bytes,2,rep,name=location_table,json=locationTable,proto3" json:"location_table,omitempty"` - // Functions referenced by locations via Line.function_index. - FunctionTable []*Function `protobuf:"bytes,3,rep,name=function_table,json=functionTable,proto3" json:"function_table,omitempty"` - // Links referenced by samples via Sample.link_index. - LinkTable []*Link `protobuf:"bytes,4,rep,name=link_table,json=linkTable,proto3" json:"link_table,omitempty"` - // A common table for strings referenced by various messages. - // string_table[0] must always be "". - StringTable []string `protobuf:"bytes,5,rep,name=string_table,json=stringTable,proto3" json:"string_table,omitempty"` - // A common table for attributes referenced by various messages. - // It is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "abc.com/myattribute": true - // "abc.com/score": 10.239 - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. - AttributeTable []*KeyValueAndUnit `protobuf:"bytes,6,rep,name=attribute_table,json=attributeTable,proto3" json:"attribute_table,omitempty"` - // Stacks referenced by samples via Sample.stack_index. - StackTable []*Stack `protobuf:"bytes,7,rep,name=stack_table,json=stackTable,proto3" json:"stack_table,omitempty"` -} - -func (m *ProfilesDictionary) Reset() { *m = ProfilesDictionary{} } -func (m *ProfilesDictionary) String() string { return proto.CompactTextString(m) } -func (*ProfilesDictionary) ProtoMessage() {} -func (*ProfilesDictionary) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{0} -} -func (m *ProfilesDictionary) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProfilesDictionary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProfilesDictionary.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProfilesDictionary) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProfilesDictionary.Merge(m, src) -} -func (m *ProfilesDictionary) XXX_Size() int { - return m.Size() -} -func (m *ProfilesDictionary) XXX_DiscardUnknown() { - xxx_messageInfo_ProfilesDictionary.DiscardUnknown(m) -} - -var xxx_messageInfo_ProfilesDictionary proto.InternalMessageInfo - -func (m *ProfilesDictionary) GetMappingTable() []*Mapping { - if m != nil { - return m.MappingTable - } - return nil -} - -func (m *ProfilesDictionary) GetLocationTable() []*Location { - if m != nil { - return m.LocationTable - } - return nil -} - -func (m *ProfilesDictionary) GetFunctionTable() []*Function { - if m != nil { - return m.FunctionTable - } - return nil -} - -func (m *ProfilesDictionary) GetLinkTable() []*Link { - if m != nil { - return m.LinkTable - } - return nil -} - -func (m *ProfilesDictionary) GetStringTable() []string { - if m != nil { - return m.StringTable - } - return nil -} - -func (m *ProfilesDictionary) GetAttributeTable() []*KeyValueAndUnit { - if m != nil { - return m.AttributeTable - } - return nil -} - -func (m *ProfilesDictionary) GetStackTable() []*Stack { - if m != nil { - return m.StackTable - } - return nil -} - -// ProfilesData represents the profiles data that can be stored in persistent storage, -// OR can be embedded by other protocols that transfer OTLP profiles data but do not -// implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -type ProfilesData struct { - // An array of ResourceProfiles. - // For data coming from an SDK profiler, this array will typically contain one - // element. Host-level profilers will usually create one ResourceProfile per - // container, as well as one additional ResourceProfile grouping all samples - // from non-containerized processes. - // Other resource groupings are possible as well and clarified via - // Resource.attributes and semantic conventions. - // Tools that visualize profiles should prefer displaying - // resources_profiles[0].scope_profiles[0].profiles[0] by default. - ResourceProfiles []*ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"` - // One instance of ProfilesDictionary - Dictionary ProfilesDictionary `protobuf:"bytes,2,opt,name=dictionary,proto3" json:"dictionary"` -} - -func (m *ProfilesData) Reset() { *m = ProfilesData{} } -func (m *ProfilesData) String() string { return proto.CompactTextString(m) } -func (*ProfilesData) ProtoMessage() {} -func (*ProfilesData) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{1} -} -func (m *ProfilesData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProfilesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProfilesData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProfilesData) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProfilesData.Merge(m, src) -} -func (m *ProfilesData) XXX_Size() int { - return m.Size() -} -func (m *ProfilesData) XXX_DiscardUnknown() { - xxx_messageInfo_ProfilesData.DiscardUnknown(m) -} - -var xxx_messageInfo_ProfilesData proto.InternalMessageInfo - -func (m *ProfilesData) GetResourceProfiles() []*ResourceProfiles { - if m != nil { - return m.ResourceProfiles - } - return nil -} - -func (m *ProfilesData) GetDictionary() ProfilesDictionary { - if m != nil { - return m.Dictionary - } - return ProfilesDictionary{} -} - -// A collection of ScopeProfiles from a Resource. -type ResourceProfiles struct { - // The resource for the profiles in this message. - // If this field is not set then no resource info is known. - Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` - // A list of ScopeProfiles that originate from a resource. - ScopeProfiles []*ScopeProfiles `protobuf:"bytes,2,rep,name=scope_profiles,json=scopeProfiles,proto3" json:"scope_profiles,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_profiles" field which have their own schema_url field. - SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` -} - -func (m *ResourceProfiles) Reset() { *m = ResourceProfiles{} } -func (m *ResourceProfiles) String() string { return proto.CompactTextString(m) } -func (*ResourceProfiles) ProtoMessage() {} -func (*ResourceProfiles) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{2} -} -func (m *ResourceProfiles) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceProfiles.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceProfiles) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceProfiles.Merge(m, src) -} -func (m *ResourceProfiles) XXX_Size() int { - return m.Size() -} -func (m *ResourceProfiles) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceProfiles.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceProfiles proto.InternalMessageInfo - -func (m *ResourceProfiles) GetResource() v1.Resource { - if m != nil { - return m.Resource - } - return v1.Resource{} -} - -func (m *ResourceProfiles) GetScopeProfiles() []*ScopeProfiles { - if m != nil { - return m.ScopeProfiles - } - return nil -} - -func (m *ResourceProfiles) GetSchemaUrl() string { - if m != nil { - return m.SchemaUrl - } - return "" -} - -// A collection of Profiles produced by an InstrumentationScope. -type ScopeProfiles struct { - // The instrumentation scope information for the profiles in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"` - // A list of Profiles that originate from an instrumentation scope. - Profiles []*Profile `protobuf:"bytes,2,rep,name=profiles,proto3" json:"profiles,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the profile data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all profiles in the "profiles" field. - SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` -} - -func (m *ScopeProfiles) Reset() { *m = ScopeProfiles{} } -func (m *ScopeProfiles) String() string { return proto.CompactTextString(m) } -func (*ScopeProfiles) ProtoMessage() {} -func (*ScopeProfiles) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{3} -} -func (m *ScopeProfiles) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScopeProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ScopeProfiles.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ScopeProfiles) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScopeProfiles.Merge(m, src) -} -func (m *ScopeProfiles) XXX_Size() int { - return m.Size() -} -func (m *ScopeProfiles) XXX_DiscardUnknown() { - xxx_messageInfo_ScopeProfiles.DiscardUnknown(m) -} - -var xxx_messageInfo_ScopeProfiles proto.InternalMessageInfo - -func (m *ScopeProfiles) GetScope() v11.InstrumentationScope { - if m != nil { - return m.Scope - } - return v11.InstrumentationScope{} -} - -func (m *ScopeProfiles) GetProfiles() []*Profile { - if m != nil { - return m.Profiles - } - return nil -} - -func (m *ScopeProfiles) GetSchemaUrl() string { - if m != nil { - return m.SchemaUrl - } - return "" -} - -// Represents a complete profile, including sample types, samples, mappings to -// binaries, stacks, locations, functions, string table, and additional -// metadata. It modifies and annotates pprof Profile with OpenTelemetry -// specific fields. -// -// Note that whilst fields in this message retain the name and field id from pprof in most cases -// for ease of understanding data migration, it is not intended that pprof:Profile and -// OpenTelemetry:Profile encoding be wire compatible. -type Profile struct { - // The type and unit of all Sample.values in this profile. - // For a cpu or off-cpu profile this might be: - // ["cpu","nanoseconds"] or ["off_cpu","nanoseconds"] - // For a heap profile, this might be: - // ["allocated_objects","count"] or ["allocated_space","bytes"], - SampleType ValueType `protobuf:"bytes,1,opt,name=sample_type,json=sampleType,proto3" json:"sample_type"` - // The set of samples recorded in this profile. - Sample []*Sample `protobuf:"bytes,2,rep,name=sample,proto3" json:"sample,omitempty"` - // Time of collection (UTC) represented as nanoseconds past the epoch. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // Duration of the profile, if a duration makes sense. - DurationNano uint64 `protobuf:"varint,4,opt,name=duration_nano,json=durationNano,proto3" json:"duration_nano,omitempty"` - // The kind of events between sampled occurrences. - // e.g [ "cpu","cycles" ] or [ "heap","bytes" ] - PeriodType ValueType `protobuf:"bytes,5,opt,name=period_type,json=periodType,proto3" json:"period_type"` - // The number of events between sampled occurrences. - Period int64 `protobuf:"varint,6,opt,name=period,proto3" json:"period,omitempty"` - // Free-form text associated with the profile. The text is displayed as is - // to the user by the tools that read profiles (e.g. by pprof). This field - // should not be used to store any machine-readable information, it is only - // for human-friendly content. The profile must stay functional if this field - // is cleaned. - CommentStrindices []int32 `protobuf:"varint,7,rep,packed,name=comment_strindices,json=commentStrindices,proto3" json:"comment_strindices,omitempty"` - // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with - // all zeroes is considered invalid. It may be used for deduplication and signal - // correlation purposes. It is acceptable to treat two profiles with different values - // in this field as not equal, even if they represented the same object at an earlier - // time. - // This field is optional; an ID may be assigned to an ID-less profile in a later step. - ProfileId go_opentelemetry_io_collector_pdata_internal_data.ProfileID `protobuf:"bytes,8,opt,name=profile_id,json=profileId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.ProfileID" json:"profile_id"` - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,9,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present] - OriginalPayloadFormat string `protobuf:"bytes,10,opt,name=original_payload_format,json=originalPayloadFormat,proto3" json:"original_payload_format,omitempty"` - // Original payload can be stored in this field. This can be useful for users who want to get the original payload. - // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. - // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. - // If the original payload is in pprof format, it SHOULD not be included in this field. - // The field is optional, however if it is present then equivalent converted data should be populated in other fields - // of this message as far as is practicable. - OriginalPayload []byte `protobuf:"bytes,11,opt,name=original_payload,json=originalPayload,proto3" json:"original_payload,omitempty"` - // References to attributes in attribute_table. [optional] - AttributeIndices []int32 `protobuf:"varint,12,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"` -} - -func (m *Profile) Reset() { *m = Profile{} } -func (m *Profile) String() string { return proto.CompactTextString(m) } -func (*Profile) ProtoMessage() {} -func (*Profile) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{4} -} -func (m *Profile) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Profile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Profile.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Profile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Profile.Merge(m, src) -} -func (m *Profile) XXX_Size() int { - return m.Size() -} -func (m *Profile) XXX_DiscardUnknown() { - xxx_messageInfo_Profile.DiscardUnknown(m) -} - -var xxx_messageInfo_Profile proto.InternalMessageInfo - -func (m *Profile) GetSampleType() ValueType { - if m != nil { - return m.SampleType - } - return ValueType{} -} - -func (m *Profile) GetSample() []*Sample { - if m != nil { - return m.Sample - } - return nil -} - -func (m *Profile) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *Profile) GetDurationNano() uint64 { - if m != nil { - return m.DurationNano - } - return 0 -} - -func (m *Profile) GetPeriodType() ValueType { - if m != nil { - return m.PeriodType - } - return ValueType{} -} - -func (m *Profile) GetPeriod() int64 { - if m != nil { - return m.Period - } - return 0 -} - -func (m *Profile) GetCommentStrindices() []int32 { - if m != nil { - return m.CommentStrindices - } - return nil -} - -func (m *Profile) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -func (m *Profile) GetOriginalPayloadFormat() string { - if m != nil { - return m.OriginalPayloadFormat - } - return "" -} - -func (m *Profile) GetOriginalPayload() []byte { - if m != nil { - return m.OriginalPayload - } - return nil -} - -func (m *Profile) GetAttributeIndices() []int32 { - if m != nil { - return m.AttributeIndices - } - return nil -} - -// A pointer from a profile Sample to a trace Span. -// Connects a profile sample to a trace span, identified by unique trace and span IDs. -type Link struct { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"` - // A unique identifier for the linked span. The ID is an 8-byte array. - SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"` -} - -func (m *Link) Reset() { *m = Link{} } -func (m *Link) String() string { return proto.CompactTextString(m) } -func (*Link) ProtoMessage() {} -func (*Link) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{5} -} -func (m *Link) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Link.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Link) XXX_Merge(src proto.Message) { - xxx_messageInfo_Link.Merge(m, src) -} -func (m *Link) XXX_Size() int { - return m.Size() -} -func (m *Link) XXX_DiscardUnknown() { - xxx_messageInfo_Link.DiscardUnknown(m) -} - -var xxx_messageInfo_Link proto.InternalMessageInfo - -// ValueType describes the type and units of a value, with an optional aggregation temporality. -type ValueType struct { - TypeStrindex int32 `protobuf:"varint,1,opt,name=type_strindex,json=typeStrindex,proto3" json:"type_strindex,omitempty"` - UnitStrindex int32 `protobuf:"varint,2,opt,name=unit_strindex,json=unitStrindex,proto3" json:"unit_strindex,omitempty"` - AggregationTemporality AggregationTemporality `protobuf:"varint,3,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.profiles.v1development.AggregationTemporality" json:"aggregation_temporality,omitempty"` -} - -func (m *ValueType) Reset() { *m = ValueType{} } -func (m *ValueType) String() string { return proto.CompactTextString(m) } -func (*ValueType) ProtoMessage() {} -func (*ValueType) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{6} -} -func (m *ValueType) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ValueType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ValueType.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ValueType) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValueType.Merge(m, src) -} -func (m *ValueType) XXX_Size() int { - return m.Size() -} -func (m *ValueType) XXX_DiscardUnknown() { - xxx_messageInfo_ValueType.DiscardUnknown(m) -} - -var xxx_messageInfo_ValueType proto.InternalMessageInfo - -func (m *ValueType) GetTypeStrindex() int32 { - if m != nil { - return m.TypeStrindex - } - return 0 -} - -func (m *ValueType) GetUnitStrindex() int32 { - if m != nil { - return m.UnitStrindex - } - return 0 -} - -func (m *ValueType) GetAggregationTemporality() AggregationTemporality { - if m != nil { - return m.AggregationTemporality - } - return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -// Each Sample records values encountered in some program context. The program -// context is typically a stack trace, perhaps augmented with auxiliary -// information like the thread-id, some indicator of a higher level request -// being handled etc. -// -// A Sample MUST have have at least one values or timestamps_unix_nano entry. If -// both fields are populated, they MUST contain the same number of elements, and -// the elements at the same index MUST refer to the same event. -// -// Examples of different ways of representing a sample with the total value of 10: -// -// Report of a stacktrace at 10 timestamps (consumers must assume the value is 1 for each point): -// -// values: [] -// timestamps_unix_nano: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] -// -// Report of a stacktrace with an aggregated value without timestamps: -// -// values: [10] -// timestamps_unix_nano: [] -// -// Report of a stacktrace at 4 timestamps where each point records a specific value: -// -// values: [2, 2, 3, 3] -// timestamps_unix_nano: [1, 2, 3, 4] -type Sample struct { - // Reference to stack in ProfilesDictionary.stack_table. - StackIndex int32 `protobuf:"varint,1,opt,name=stack_index,json=stackIndex,proto3" json:"stack_index,omitempty"` - // The type and unit of each value is defined by Profile.sample_type. - Values []int64 `protobuf:"varint,2,rep,packed,name=values,proto3" json:"values,omitempty"` - // References to attributes in ProfilesDictionary.attribute_table. [optional] - AttributeIndices []int32 `protobuf:"varint,3,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"` - // Reference to link in ProfilesDictionary.link_table. [optional] - // It can be unset / set to 0 if no link exists, as link_table[0] is always a 'null' default value. - LinkIndex int32 `protobuf:"varint,4,opt,name=link_index,json=linkIndex,proto3" json:"link_index,omitempty"` - // Timestamps associated with Sample represented in nanoseconds. These - // timestamps should fall within the Profile's time range. - TimestampsUnixNano []uint64 `protobuf:"fixed64,5,rep,packed,name=timestamps_unix_nano,json=timestampsUnixNano,proto3" json:"timestamps_unix_nano,omitempty"` -} - -func (m *Sample) Reset() { *m = Sample{} } -func (m *Sample) String() string { return proto.CompactTextString(m) } -func (*Sample) ProtoMessage() {} -func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{7} -} -func (m *Sample) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Sample.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Sample) XXX_Merge(src proto.Message) { - xxx_messageInfo_Sample.Merge(m, src) -} -func (m *Sample) XXX_Size() int { - return m.Size() -} -func (m *Sample) XXX_DiscardUnknown() { - xxx_messageInfo_Sample.DiscardUnknown(m) -} - -var xxx_messageInfo_Sample proto.InternalMessageInfo - -func (m *Sample) GetStackIndex() int32 { - if m != nil { - return m.StackIndex - } - return 0 -} - -func (m *Sample) GetValues() []int64 { - if m != nil { - return m.Values - } - return nil -} - -func (m *Sample) GetAttributeIndices() []int32 { - if m != nil { - return m.AttributeIndices - } - return nil -} - -func (m *Sample) GetLinkIndex() int32 { - if m != nil { - return m.LinkIndex - } - return 0 -} - -func (m *Sample) GetTimestampsUnixNano() []uint64 { - if m != nil { - return m.TimestampsUnixNano - } - return nil -} - -// Describes the mapping of a binary in memory, including its address range, -// file offset, and metadata like build ID -type Mapping struct { - // Address at which the binary (or DLL) is loaded into memory. - MemoryStart uint64 `protobuf:"varint,1,opt,name=memory_start,json=memoryStart,proto3" json:"memory_start,omitempty"` - // The limit of the address range occupied by this mapping. - MemoryLimit uint64 `protobuf:"varint,2,opt,name=memory_limit,json=memoryLimit,proto3" json:"memory_limit,omitempty"` - // Offset in the binary that corresponds to the first mapped address. - FileOffset uint64 `protobuf:"varint,3,opt,name=file_offset,json=fileOffset,proto3" json:"file_offset,omitempty"` - // The object this entry is loaded from. This can be a filename on - // disk for the main binary and shared libraries, or virtual - // abstractions like "[vdso]". - FilenameStrindex int32 `protobuf:"varint,4,opt,name=filename_strindex,json=filenameStrindex,proto3" json:"filename_strindex,omitempty"` - // References to attributes in ProfilesDictionary.attribute_table. [optional] - AttributeIndices []int32 `protobuf:"varint,5,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"` -} - -func (m *Mapping) Reset() { *m = Mapping{} } -func (m *Mapping) String() string { return proto.CompactTextString(m) } -func (*Mapping) ProtoMessage() {} -func (*Mapping) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{8} -} -func (m *Mapping) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Mapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Mapping.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Mapping) XXX_Merge(src proto.Message) { - xxx_messageInfo_Mapping.Merge(m, src) -} -func (m *Mapping) XXX_Size() int { - return m.Size() -} -func (m *Mapping) XXX_DiscardUnknown() { - xxx_messageInfo_Mapping.DiscardUnknown(m) -} - -var xxx_messageInfo_Mapping proto.InternalMessageInfo - -func (m *Mapping) GetMemoryStart() uint64 { - if m != nil { - return m.MemoryStart - } - return 0 -} - -func (m *Mapping) GetMemoryLimit() uint64 { - if m != nil { - return m.MemoryLimit - } - return 0 -} - -func (m *Mapping) GetFileOffset() uint64 { - if m != nil { - return m.FileOffset - } - return 0 -} - -func (m *Mapping) GetFilenameStrindex() int32 { - if m != nil { - return m.FilenameStrindex - } - return 0 -} - -func (m *Mapping) GetAttributeIndices() []int32 { - if m != nil { - return m.AttributeIndices - } - return nil -} - -// A Stack represents a stack trace as a list of locations. -type Stack struct { - // References to locations in ProfilesDictionary.location_table. - // The first location is the leaf frame. - LocationIndices []int32 `protobuf:"varint,1,rep,packed,name=location_indices,json=locationIndices,proto3" json:"location_indices,omitempty"` -} - -func (m *Stack) Reset() { *m = Stack{} } -func (m *Stack) String() string { return proto.CompactTextString(m) } -func (*Stack) ProtoMessage() {} -func (*Stack) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{9} -} -func (m *Stack) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Stack) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Stack.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Stack) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stack.Merge(m, src) -} -func (m *Stack) XXX_Size() int { - return m.Size() -} -func (m *Stack) XXX_DiscardUnknown() { - xxx_messageInfo_Stack.DiscardUnknown(m) -} - -var xxx_messageInfo_Stack proto.InternalMessageInfo - -func (m *Stack) GetLocationIndices() []int32 { - if m != nil { - return m.LocationIndices - } - return nil -} - -// Describes function and line table debug information. -type Location struct { - // Reference to mapping in ProfilesDictionary.mapping_table. - // It can be unset / set to 0 if the mapping is unknown or not applicable for - // this profile type, as mapping_table[0] is always a 'null' default mapping. - MappingIndex int32 `protobuf:"varint,1,opt,name=mapping_index,json=mappingIndex,proto3" json:"mapping_index,omitempty"` - // The instruction address for this location, if available. It - // should be within [Mapping.memory_start...Mapping.memory_limit] - // for the corresponding mapping. A non-leaf address may be in the - // middle of a call instruction. It is up to display tools to find - // the beginning of the instruction if necessary. - Address uint64 `protobuf:"varint,2,opt,name=address,proto3" json:"address,omitempty"` - // Multiple line indicates this location has inlined functions, - // where the last entry represents the caller into which the - // preceding entries were inlined. - // - // E.g., if memcpy() is inlined into printf: - // line[0].function_name == "memcpy" - // line[1].function_name == "printf" - Line []*Line `protobuf:"bytes,3,rep,name=line,proto3" json:"line,omitempty"` - // References to attributes in ProfilesDictionary.attribute_table. [optional] - AttributeIndices []int32 `protobuf:"varint,4,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"` -} - -func (m *Location) Reset() { *m = Location{} } -func (m *Location) String() string { return proto.CompactTextString(m) } -func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{10} -} -func (m *Location) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Location.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Location) XXX_Merge(src proto.Message) { - xxx_messageInfo_Location.Merge(m, src) -} -func (m *Location) XXX_Size() int { - return m.Size() -} -func (m *Location) XXX_DiscardUnknown() { - xxx_messageInfo_Location.DiscardUnknown(m) -} - -var xxx_messageInfo_Location proto.InternalMessageInfo - -func (m *Location) GetMappingIndex() int32 { - if m != nil { - return m.MappingIndex - } - return 0 -} - -func (m *Location) GetAddress() uint64 { - if m != nil { - return m.Address - } - return 0 -} - -func (m *Location) GetLine() []*Line { - if m != nil { - return m.Line - } - return nil -} - -func (m *Location) GetAttributeIndices() []int32 { - if m != nil { - return m.AttributeIndices - } - return nil -} - -// Details a specific line in a source code, linked to a function. -type Line struct { - // Reference to function in ProfilesDictionary.function_table. - FunctionIndex int32 `protobuf:"varint,1,opt,name=function_index,json=functionIndex,proto3" json:"function_index,omitempty"` - // Line number in source code. 0 means unset. - Line int64 `protobuf:"varint,2,opt,name=line,proto3" json:"line,omitempty"` - // Column number in source code. 0 means unset. - Column int64 `protobuf:"varint,3,opt,name=column,proto3" json:"column,omitempty"` -} - -func (m *Line) Reset() { *m = Line{} } -func (m *Line) String() string { return proto.CompactTextString(m) } -func (*Line) ProtoMessage() {} -func (*Line) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{11} -} -func (m *Line) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Line) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Line.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Line) XXX_Merge(src proto.Message) { - xxx_messageInfo_Line.Merge(m, src) -} -func (m *Line) XXX_Size() int { - return m.Size() -} -func (m *Line) XXX_DiscardUnknown() { - xxx_messageInfo_Line.DiscardUnknown(m) -} - -var xxx_messageInfo_Line proto.InternalMessageInfo - -func (m *Line) GetFunctionIndex() int32 { - if m != nil { - return m.FunctionIndex - } - return 0 -} - -func (m *Line) GetLine() int64 { - if m != nil { - return m.Line - } - return 0 -} - -func (m *Line) GetColumn() int64 { - if m != nil { - return m.Column - } - return 0 -} - -// Describes a function, including its human-readable name, system name, -// source file, and starting line number in the source. -type Function struct { - // Function name. Empty string if not available. - NameStrindex int32 `protobuf:"varint,1,opt,name=name_strindex,json=nameStrindex,proto3" json:"name_strindex,omitempty"` - // Function name, as identified by the system. For instance, - // it can be a C++ mangled name. Empty string if not available. - SystemNameStrindex int32 `protobuf:"varint,2,opt,name=system_name_strindex,json=systemNameStrindex,proto3" json:"system_name_strindex,omitempty"` - // Source file containing the function. Empty string if not available. - FilenameStrindex int32 `protobuf:"varint,3,opt,name=filename_strindex,json=filenameStrindex,proto3" json:"filename_strindex,omitempty"` - // Line number in source file. 0 means unset. - StartLine int64 `protobuf:"varint,4,opt,name=start_line,json=startLine,proto3" json:"start_line,omitempty"` -} - -func (m *Function) Reset() { *m = Function{} } -func (m *Function) String() string { return proto.CompactTextString(m) } -func (*Function) ProtoMessage() {} -func (*Function) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{12} -} -func (m *Function) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Function) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Function.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Function) XXX_Merge(src proto.Message) { - xxx_messageInfo_Function.Merge(m, src) -} -func (m *Function) XXX_Size() int { - return m.Size() -} -func (m *Function) XXX_DiscardUnknown() { - xxx_messageInfo_Function.DiscardUnknown(m) -} - -var xxx_messageInfo_Function proto.InternalMessageInfo - -func (m *Function) GetNameStrindex() int32 { - if m != nil { - return m.NameStrindex - } - return 0 -} - -func (m *Function) GetSystemNameStrindex() int32 { - if m != nil { - return m.SystemNameStrindex - } - return 0 -} - -func (m *Function) GetFilenameStrindex() int32 { - if m != nil { - return m.FilenameStrindex - } - return 0 -} - -func (m *Function) GetStartLine() int64 { - if m != nil { - return m.StartLine - } - return 0 -} - -// A custom 'dictionary native' style of encoding attributes which is more convenient -// for profiles than opentelemetry.proto.common.v1.KeyValue -// Specifically, uses the string table for keys and allows optional unit information. -type KeyValueAndUnit struct { - KeyStrindex int32 `protobuf:"varint,1,opt,name=key_strindex,json=keyStrindex,proto3" json:"key_strindex,omitempty"` - Value v11.AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value"` - UnitStrindex int32 `protobuf:"varint,3,opt,name=unit_strindex,json=unitStrindex,proto3" json:"unit_strindex,omitempty"` -} - -func (m *KeyValueAndUnit) Reset() { *m = KeyValueAndUnit{} } -func (m *KeyValueAndUnit) String() string { return proto.CompactTextString(m) } -func (*KeyValueAndUnit) ProtoMessage() {} -func (*KeyValueAndUnit) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{13} -} -func (m *KeyValueAndUnit) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KeyValueAndUnit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KeyValueAndUnit.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KeyValueAndUnit) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyValueAndUnit.Merge(m, src) -} -func (m *KeyValueAndUnit) XXX_Size() int { - return m.Size() -} -func (m *KeyValueAndUnit) XXX_DiscardUnknown() { - xxx_messageInfo_KeyValueAndUnit.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyValueAndUnit proto.InternalMessageInfo - -func (m *KeyValueAndUnit) GetKeyStrindex() int32 { - if m != nil { - return m.KeyStrindex - } - return 0 -} - -func (m *KeyValueAndUnit) GetValue() v11.AnyValue { - if m != nil { - return m.Value - } - return v11.AnyValue{} -} - -func (m *KeyValueAndUnit) GetUnitStrindex() int32 { - if m != nil { - return m.UnitStrindex - } - return 0 -} - -func init() { - proto.RegisterEnum("opentelemetry.proto.profiles.v1development.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value) - proto.RegisterType((*ProfilesDictionary)(nil), "opentelemetry.proto.profiles.v1development.ProfilesDictionary") - proto.RegisterType((*ProfilesData)(nil), "opentelemetry.proto.profiles.v1development.ProfilesData") - proto.RegisterType((*ResourceProfiles)(nil), "opentelemetry.proto.profiles.v1development.ResourceProfiles") - proto.RegisterType((*ScopeProfiles)(nil), "opentelemetry.proto.profiles.v1development.ScopeProfiles") - proto.RegisterType((*Profile)(nil), "opentelemetry.proto.profiles.v1development.Profile") - proto.RegisterType((*Link)(nil), "opentelemetry.proto.profiles.v1development.Link") - proto.RegisterType((*ValueType)(nil), "opentelemetry.proto.profiles.v1development.ValueType") - proto.RegisterType((*Sample)(nil), "opentelemetry.proto.profiles.v1development.Sample") - proto.RegisterType((*Mapping)(nil), "opentelemetry.proto.profiles.v1development.Mapping") - proto.RegisterType((*Stack)(nil), "opentelemetry.proto.profiles.v1development.Stack") - proto.RegisterType((*Location)(nil), "opentelemetry.proto.profiles.v1development.Location") - proto.RegisterType((*Line)(nil), "opentelemetry.proto.profiles.v1development.Line") - proto.RegisterType((*Function)(nil), "opentelemetry.proto.profiles.v1development.Function") - proto.RegisterType((*KeyValueAndUnit)(nil), "opentelemetry.proto.profiles.v1development.KeyValueAndUnit") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/profiles/v1development/profiles.proto", fileDescriptor_ddd0cf081a2fe76f) -} - -var fileDescriptor_ddd0cf081a2fe76f = []byte{ - // 1475 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x97, 0x5f, 0x6f, 0x13, 0xc7, - 0x16, 0xc0, 0xb3, 0xf1, 0xbf, 0xf8, 0xd8, 0x4e, 0xcc, 0x88, 0x1b, 0x2c, 0x24, 0x82, 0x31, 0xf7, - 0x5e, 0x4c, 0xae, 0x48, 0x48, 0xb8, 0xad, 0x40, 0x54, 0x55, 0x9d, 0x38, 0xa0, 0x85, 0x90, 0xa4, - 0x13, 0x07, 0x95, 0x16, 0x69, 0x3b, 0xf1, 0x4e, 0xdc, 0x55, 0x76, 0x67, 0x57, 0xbb, 0xe3, 0x08, - 0xab, 0x5f, 0xa1, 0x0f, 0xfd, 0x04, 0xfd, 0x00, 0x95, 0xfa, 0x0d, 0x2a, 0xf5, 0x15, 0xf5, 0x89, - 0xf6, 0xa1, 0x42, 0x7d, 0xa0, 0x15, 0xbc, 0xd0, 0x6f, 0x51, 0xcd, 0x9f, 0x5d, 0xff, 0x89, 0x23, - 0xba, 0x55, 0x5f, 0xac, 0x9d, 0x33, 0x67, 0x7e, 0x67, 0xce, 0x99, 0x33, 0x67, 0x8e, 0xe1, 0x8e, - 0x1f, 0x50, 0xc6, 0xa9, 0x4b, 0x3d, 0xca, 0xc3, 0xc1, 0x6a, 0x10, 0xfa, 0xdc, 0x17, 0xbf, 0x47, - 0x8e, 0x4b, 0xa3, 0xd5, 0x93, 0x35, 0x9b, 0x9e, 0x50, 0xd7, 0x0f, 0x3c, 0xca, 0x78, 0x22, 0x5e, - 0x91, 0x5a, 0x68, 0x79, 0x6c, 0xa9, 0x12, 0xae, 0x24, 0x3a, 0x63, 0x4b, 0x2f, 0x9e, 0xef, 0xf9, - 0x3d, 0x5f, 0xc1, 0xc5, 0x97, 0x52, 0xbe, 0xb8, 0x3c, 0xcd, 0x78, 0xd7, 0xf7, 0x3c, 0x9f, 0xad, - 0x9e, 0xac, 0xe9, 0x2f, 0xad, 0xbb, 0x32, 0x4d, 0x37, 0xa4, 0x91, 0xdf, 0x0f, 0xbb, 0x54, 0x68, - 0xc7, 0xdf, 0x4a, 0xbf, 0xf1, 0x4b, 0x16, 0xd0, 0x9e, 0xde, 0x4c, 0xdb, 0xe9, 0x72, 0xc7, 0x67, - 0x24, 0x1c, 0xa0, 0x4f, 0xa0, 0xe2, 0x91, 0x20, 0x70, 0x58, 0xcf, 0xe2, 0xe4, 0xd0, 0xa5, 0x35, - 0xa3, 0x9e, 0x69, 0x96, 0xd6, 0x6f, 0xad, 0xfc, 0x75, 0x67, 0x56, 0x1e, 0x29, 0x00, 0x2e, 0x6b, - 0x52, 0x47, 0x80, 0xd0, 0x67, 0x30, 0xef, 0xfa, 0x5d, 0x22, 0x0c, 0x69, 0xf4, 0xac, 0x44, 0xff, - 0x3f, 0x0d, 0x7a, 0x5b, 0x13, 0x70, 0x25, 0x66, 0x25, 0xf0, 0xa3, 0x3e, 0xeb, 0x8e, 0xc0, 0x33, - 0xe9, 0xe1, 0xf7, 0x34, 0x01, 0x57, 0x62, 0x96, 0x82, 0xef, 0x02, 0xb8, 0x0e, 0x3b, 0xd6, 0xe0, - 0xac, 0x04, 0xdf, 0x4c, 0xb5, 0x6b, 0x87, 0x1d, 0xe3, 0xa2, 0x60, 0x28, 0xe0, 0x15, 0x28, 0x47, - 0x3c, 0x1c, 0xc6, 0x38, 0x57, 0xcf, 0x34, 0x8b, 0xb8, 0xa4, 0x64, 0x4a, 0xc5, 0x86, 0x05, 0xc2, - 0x79, 0xe8, 0x1c, 0xf6, 0x39, 0xd5, 0x5a, 0x79, 0x69, 0xf8, 0x6e, 0x1a, 0xc3, 0x0f, 0xe9, 0xe0, - 0x31, 0x71, 0xfb, 0xb4, 0xc5, 0xec, 0x03, 0xe6, 0x70, 0x3c, 0x9f, 0x30, 0x95, 0x15, 0x0c, 0xa5, - 0x88, 0x93, 0x6e, 0xec, 0x5a, 0x41, 0x5a, 0x58, 0x4b, 0x63, 0x61, 0x5f, 0x2c, 0xc7, 0x20, 0x29, - 0x92, 0xd9, 0xf8, 0xcd, 0x80, 0x72, 0x92, 0x58, 0x84, 0x13, 0xe4, 0xc0, 0xb9, 0x38, 0xf7, 0xac, - 0x98, 0xa2, 0xd3, 0xea, 0x83, 0x34, 0xa6, 0xb0, 0x86, 0xc4, 0x70, 0x5c, 0x0d, 0x27, 0x24, 0xc8, - 0x06, 0xb0, 0x93, 0x5c, 0xae, 0xcd, 0xd6, 0x8d, 0x66, 0x69, 0xfd, 0xc3, 0x34, 0x36, 0x4e, 0xdf, - 0x88, 0x8d, 0xec, 0xf3, 0x57, 0x97, 0x67, 0xf0, 0x08, 0xb7, 0xf1, 0xd6, 0x80, 0xea, 0xe4, 0x66, - 0xd0, 0x43, 0x98, 0x8b, 0xb7, 0x53, 0x33, 0xa4, 0xe1, 0xeb, 0x53, 0x0d, 0x27, 0xd7, 0xf0, 0x64, - 0x2d, 0xf1, 0x48, 0xdb, 0x48, 0x00, 0xe8, 0x73, 0x98, 0x8f, 0xba, 0x7e, 0x30, 0x12, 0x2f, 0x75, - 0x57, 0xee, 0xa4, 0x3a, 0x1a, 0x41, 0x48, 0x82, 0x55, 0x89, 0x46, 0x87, 0xe8, 0x12, 0x40, 0xd4, - 0xfd, 0x82, 0x7a, 0xc4, 0xea, 0x87, 0x6e, 0x2d, 0x53, 0x37, 0x9a, 0x45, 0x5c, 0x54, 0x92, 0x83, - 0xd0, 0x7d, 0x90, 0x9f, 0x7b, 0x5b, 0xa8, 0xfe, 0x51, 0x68, 0xbc, 0x30, 0xa0, 0x32, 0xc6, 0x41, - 0xbb, 0x90, 0x93, 0x24, 0xed, 0xe4, 0xf4, 0xc2, 0xa0, 0x2b, 0xd3, 0xc9, 0xda, 0x8a, 0xc9, 0x22, - 0x1e, 0xf6, 0xc5, 0x7e, 0xe4, 0x6d, 0x95, 0x2c, 0xed, 0xae, 0xe2, 0xa0, 0x5d, 0x98, 0x9b, 0xf0, - 0xf2, 0xd6, 0xdf, 0x38, 0x31, 0x9c, 0x40, 0xde, 0xe1, 0x5a, 0xe3, 0xa7, 0x1c, 0x14, 0xf4, 0x22, - 0xf4, 0x14, 0x4a, 0x11, 0xf1, 0x02, 0x97, 0x5a, 0x7c, 0x90, 0xb8, 0xf4, 0x5e, 0x1a, 0xf3, 0xf2, - 0x7a, 0x75, 0x06, 0x89, 0x53, 0xa0, 0x78, 0x42, 0x82, 0x1e, 0x40, 0x5e, 0x8d, 0xb4, 0x5f, 0xeb, - 0xa9, 0x4e, 0x4f, 0xae, 0xc4, 0x9a, 0x80, 0xfe, 0x0d, 0xf3, 0xdc, 0xf1, 0xa8, 0xd5, 0x67, 0xce, - 0x33, 0x8b, 0x11, 0xe6, 0x4b, 0xc7, 0xf2, 0xb8, 0x2c, 0xa4, 0x07, 0xcc, 0x79, 0xb6, 0x43, 0x98, - 0x8f, 0xae, 0x42, 0xc5, 0xee, 0x87, 0xaa, 0xc6, 0x4a, 0xa5, 0x6c, 0xdd, 0x68, 0x66, 0x71, 0x39, - 0x16, 0x4a, 0xa5, 0xa7, 0x50, 0x0a, 0x68, 0xe8, 0xf8, 0xb6, 0x72, 0x3a, 0xf7, 0x0f, 0x38, 0xad, - 0x78, 0xd2, 0xe9, 0x45, 0xc8, 0xab, 0x51, 0x2d, 0x5f, 0x37, 0x9a, 0x19, 0xac, 0x47, 0xe8, 0x06, - 0x20, 0x91, 0x15, 0x94, 0x71, 0x4b, 0xd6, 0x39, 0xdb, 0xe9, 0xd2, 0x48, 0x56, 0x9c, 0x1c, 0x3e, - 0xa7, 0x67, 0xf6, 0x93, 0x09, 0x74, 0x08, 0xa0, 0x8d, 0x5b, 0x8e, 0x5d, 0x9b, 0xab, 0x1b, 0xcd, - 0xf2, 0xc6, 0xa6, 0x30, 0xf6, 0xeb, 0xab, 0xcb, 0x77, 0x7b, 0xfe, 0xc4, 0x6e, 0x1d, 0xf1, 0x2c, - 0xba, 0x2e, 0xed, 0x72, 0x3f, 0x5c, 0x0d, 0x6c, 0xc2, 0xc9, 0xaa, 0xc3, 0x38, 0x0d, 0x19, 0x71, - 0x57, 0xc5, 0x28, 0xce, 0x12, 0xb3, 0x8d, 0x8b, 0x1a, 0x6b, 0xda, 0xe8, 0x36, 0xd4, 0xec, 0xd0, - 0x0f, 0x02, 0x6a, 0x5b, 0x49, 0x5d, 0x8c, 0xac, 0xae, 0xdf, 0x67, 0xbc, 0x56, 0xac, 0x1b, 0xcd, - 0x0a, 0x5e, 0xd4, 0xf3, 0xad, 0x64, 0x7a, 0x53, 0xcc, 0xa2, 0xf7, 0xe1, 0x82, 0x1f, 0x3a, 0x3d, - 0x87, 0x11, 0xd7, 0x0a, 0xc8, 0xc0, 0xf5, 0x89, 0x6d, 0x1d, 0xf9, 0xa1, 0x47, 0x78, 0x0d, 0x64, - 0xbe, 0xfd, 0x2b, 0x9e, 0xde, 0x53, 0xb3, 0xf7, 0xe4, 0x24, 0xba, 0x0e, 0xd5, 0xc9, 0x75, 0xb5, - 0x92, 0xf0, 0x0d, 0x2f, 0x4c, 0x2c, 0x40, 0xff, 0x83, 0x73, 0xc3, 0x07, 0x20, 0x0e, 0x57, 0x59, - 0x86, 0xab, 0x9a, 0x4c, 0x98, 0x4a, 0xde, 0xf8, 0xc1, 0x80, 0xac, 0x78, 0x64, 0xd0, 0x53, 0x98, - 0xe3, 0x21, 0xe9, 0xca, 0xa0, 0x19, 0x32, 0x68, 0x2d, 0x1d, 0xb4, 0x3b, 0xe9, 0x83, 0xd6, 0x11, - 0x24, 0xb3, 0x8d, 0x0b, 0x12, 0x69, 0xda, 0xe8, 0x09, 0x14, 0xa2, 0x80, 0x30, 0x01, 0x9f, 0x95, - 0xf0, 0x8f, 0x34, 0xfc, 0x76, 0x7a, 0xf8, 0x7e, 0x40, 0x98, 0xd9, 0xc6, 0x79, 0x01, 0x34, 0xed, - 0xc6, 0xcf, 0x06, 0x14, 0x93, 0xb4, 0x12, 0x79, 0x2c, 0x72, 0x53, 0x67, 0x0a, 0x7d, 0x26, 0x7d, - 0xc9, 0xe1, 0xb2, 0x10, 0xee, 0x6b, 0x99, 0x50, 0xea, 0x33, 0x87, 0x0f, 0x95, 0x66, 0x95, 0x92, - 0x10, 0x26, 0x4a, 0x5f, 0xc2, 0x05, 0xd2, 0xeb, 0x85, 0xb4, 0xa7, 0x1b, 0x0f, 0xea, 0x05, 0x7e, - 0x48, 0x5c, 0x87, 0x0f, 0xe4, 0x05, 0x9a, 0x5f, 0xdf, 0x48, 0x93, 0xf8, 0xad, 0x21, 0xaa, 0x33, - 0x24, 0xe1, 0x45, 0x32, 0x55, 0x2e, 0x8e, 0x25, 0xaf, 0xee, 0x31, 0xba, 0x1c, 0xbf, 0xb4, 0xa3, - 0xfe, 0xa8, 0x67, 0xd3, 0x94, 0x1b, 0x5d, 0x84, 0xfc, 0x89, 0xf0, 0x5f, 0x15, 0xc1, 0x0c, 0xd6, - 0xa3, 0xe9, 0x79, 0x90, 0x99, 0x9e, 0x07, 0xa2, 0xf4, 0xc9, 0x4e, 0x45, 0x19, 0xc9, 0x4a, 0x23, - 0xb2, 0xef, 0x50, 0x36, 0x6e, 0xc2, 0x79, 0x51, 0x2e, 0x22, 0x4e, 0xbc, 0x20, 0x1a, 0x29, 0x25, - 0xa2, 0xff, 0xc8, 0x63, 0x34, 0x9c, 0x8b, 0x0b, 0x4a, 0xe3, 0x47, 0x03, 0x0a, 0xba, 0x9d, 0x13, - 0x5d, 0x8b, 0x47, 0x3d, 0x3f, 0x1c, 0x58, 0x11, 0x27, 0x21, 0x97, 0x3e, 0x64, 0x71, 0x49, 0xc9, - 0xf6, 0x85, 0x68, 0x44, 0xc5, 0x75, 0x3c, 0x87, 0xcb, 0x13, 0x49, 0x54, 0xb6, 0x85, 0x48, 0x04, - 0x42, 0xde, 0x6a, 0xff, 0xe8, 0x28, 0xa2, 0x5c, 0x1e, 0x42, 0x16, 0x83, 0x10, 0xed, 0x4a, 0x89, - 0x70, 0x58, 0x8c, 0x18, 0xf1, 0x46, 0xce, 0x5f, 0xb9, 0x52, 0x8d, 0x27, 0x92, 0xe3, 0x9d, 0x1a, - 0x9d, 0xdc, 0x19, 0xb7, 0x64, 0x1d, 0x72, 0xb2, 0x5d, 0x11, 0xd7, 0x30, 0x69, 0x45, 0xe3, 0x45, - 0x86, 0x5c, 0xb4, 0x10, 0xcb, 0xe3, 0x35, 0xdf, 0x1b, 0x30, 0x17, 0x37, 0x9d, 0x22, 0xe3, 0xe2, - 0xe6, 0x78, 0x2c, 0x2d, 0xb5, 0x50, 0x05, 0xb9, 0x06, 0x05, 0x62, 0xdb, 0x21, 0x8d, 0x22, 0xed, - 0x7e, 0x3c, 0x44, 0x6d, 0xc8, 0xba, 0x0e, 0x8b, 0x5b, 0xd3, 0xb4, 0x1d, 0x24, 0xc5, 0x72, 0xf5, - 0x74, 0x97, 0xb3, 0x67, 0xb8, 0xfc, 0x44, 0xd6, 0x05, 0x8a, 0xfe, 0x33, 0xd2, 0x1f, 0x8f, 0x6e, - 0x3d, 0xe9, 0x74, 0xd5, 0xde, 0x91, 0xde, 0xe1, 0xac, 0x2c, 0xdd, 0xca, 0xde, 0x22, 0xe4, 0xbb, - 0xbe, 0xdb, 0xf7, 0x98, 0x3c, 0xab, 0x0c, 0xd6, 0xa3, 0xc6, 0x77, 0x06, 0xcc, 0xc5, 0x1d, 0xb3, - 0x88, 0xcc, 0xf8, 0x81, 0xe9, 0xc8, 0x8c, 0x1d, 0xd6, 0x4d, 0x38, 0x1f, 0x0d, 0x22, 0x4e, 0x3d, - 0x6b, 0x5c, 0x57, 0xdd, 0x5b, 0xa4, 0xe6, 0x76, 0x26, 0x8e, 0xf7, 0x74, 0x2e, 0x64, 0xce, 0xc8, - 0x05, 0xf1, 0xee, 0x8b, 0x2c, 0xb4, 0xa4, 0x0b, 0x59, 0xb9, 0xd9, 0xa2, 0x94, 0x88, 0x10, 0x34, - 0xbe, 0x31, 0x60, 0x61, 0xa2, 0x1f, 0x16, 0xf9, 0x7a, 0x4c, 0x07, 0x93, 0xbb, 0x2e, 0x1d, 0xd3, - 0x41, 0x42, 0xdd, 0x84, 0x9c, 0xbc, 0x89, 0xba, 0x9b, 0xbc, 0xf6, 0x8e, 0x7e, 0xa7, 0xc5, 0x94, - 0x85, 0xb8, 0xc7, 0x91, 0x6b, 0x4f, 0x97, 0xaa, 0xcc, 0xe9, 0x52, 0xb5, 0xfc, 0x95, 0x01, 0x8b, - 0xd3, 0x0b, 0x0c, 0xba, 0x06, 0x57, 0x5b, 0xf7, 0xef, 0xe3, 0xad, 0xfb, 0xad, 0x8e, 0xb9, 0xbb, - 0x63, 0x75, 0xb6, 0x1e, 0xed, 0xed, 0xe2, 0xd6, 0xb6, 0xd9, 0x79, 0x62, 0x1d, 0xec, 0xec, 0xef, - 0x6d, 0x6d, 0x9a, 0xf7, 0xcc, 0xad, 0x76, 0x75, 0x06, 0x5d, 0x81, 0x4b, 0x67, 0x29, 0xb6, 0xb7, - 0xb6, 0x3b, 0xad, 0xaa, 0x81, 0xfe, 0x0b, 0x8d, 0xb3, 0x54, 0x36, 0x0f, 0x1e, 0x1d, 0x6c, 0xb7, - 0x3a, 0xe6, 0xe3, 0xad, 0xea, 0xec, 0xc6, 0x4b, 0xe3, 0xf9, 0xeb, 0x25, 0xe3, 0xc5, 0xeb, 0x25, - 0xe3, 0xf7, 0xd7, 0x4b, 0xc6, 0xd7, 0x6f, 0x96, 0x66, 0x5e, 0xbc, 0x59, 0x9a, 0x79, 0xf9, 0x66, - 0x69, 0x06, 0x6e, 0x38, 0x7e, 0x8a, 0xe4, 0xdd, 0xa8, 0xc4, 0xcd, 0xe3, 0x9e, 0xd0, 0xda, 0x33, - 0x3e, 0xfd, 0x38, 0xf5, 0x73, 0xa1, 0xfe, 0xcf, 0xf6, 0x28, 0x3b, 0xe3, 0xbf, 0xf7, 0xb7, 0xb3, - 0xcb, 0xbb, 0x01, 0x65, 0x9d, 0x04, 0x28, 0x4d, 0x25, 0xed, 0xfc, 0xca, 0xe3, 0xb5, 0xf6, 0x50, - 0xf9, 0x30, 0x2f, 0x69, 0xb7, 0xfe, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x93, 0x0f, 0x25, 0xda, 0xdd, - 0x0f, 0x00, 0x00, -} - -func (m *ProfilesDictionary) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProfilesDictionary) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProfilesDictionary) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.StackTable) > 0 { - for iNdEx := len(m.StackTable) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.StackTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if len(m.AttributeTable) > 0 { - for iNdEx := len(m.AttributeTable) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AttributeTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if len(m.StringTable) > 0 { - for iNdEx := len(m.StringTable) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.StringTable[iNdEx]) - copy(dAtA[i:], m.StringTable[iNdEx]) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.StringTable[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.LinkTable) > 0 { - for iNdEx := len(m.LinkTable) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.LinkTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.FunctionTable) > 0 { - for iNdEx := len(m.FunctionTable) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.FunctionTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.LocationTable) > 0 { - for iNdEx := len(m.LocationTable) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.LocationTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.MappingTable) > 0 { - for iNdEx := len(m.MappingTable) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MappingTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ProfilesData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProfilesData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProfilesData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Dictionary.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.ResourceProfiles) > 0 { - for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResourceProfiles) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceProfiles) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SchemaUrl) > 0 { - i -= len(m.SchemaUrl) - copy(dAtA[i:], m.SchemaUrl) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl))) - i-- - dAtA[i] = 0x1a - } - if len(m.ScopeProfiles) > 0 { - for iNdEx := len(m.ScopeProfiles) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ScopeProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ScopeProfiles) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScopeProfiles) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScopeProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SchemaUrl) > 0 { - i -= len(m.SchemaUrl) - copy(dAtA[i:], m.SchemaUrl) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl))) - i-- - dAtA[i] = 0x1a - } - if len(m.Profiles) > 0 { - for iNdEx := len(m.Profiles) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Profiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Profile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Profile) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.AttributeIndices) > 0 { - dAtA5 := make([]byte, len(m.AttributeIndices)*10) - var j4 int - for _, num1 := range m.AttributeIndices { - num := uint64(num1) - for num >= 1<<7 { - dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j4++ - } - dAtA5[j4] = uint8(num) - j4++ - } - i -= j4 - copy(dAtA[i:], dAtA5[:j4]) - i = encodeVarintProfiles(dAtA, i, uint64(j4)) - i-- - dAtA[i] = 0x62 - } - if len(m.OriginalPayload) > 0 { - i -= len(m.OriginalPayload) - copy(dAtA[i:], m.OriginalPayload) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayload))) - i-- - dAtA[i] = 0x5a - } - if len(m.OriginalPayloadFormat) > 0 { - i -= len(m.OriginalPayloadFormat) - copy(dAtA[i:], m.OriginalPayloadFormat) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayloadFormat))) - i-- - dAtA[i] = 0x52 - } - if m.DroppedAttributesCount != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x48 - } - { - size := m.ProfileId.Size() - i -= size - if _, err := m.ProfileId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - if len(m.CommentStrindices) > 0 { - dAtA7 := make([]byte, len(m.CommentStrindices)*10) - var j6 int - for _, num1 := range m.CommentStrindices { - num := uint64(num1) - for num >= 1<<7 { - dAtA7[j6] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j6++ - } - dAtA7[j6] = uint8(num) - j6++ - } - i -= j6 - copy(dAtA[i:], dAtA7[:j6]) - i = encodeVarintProfiles(dAtA, i, uint64(j6)) - i-- - dAtA[i] = 0x3a - } - if m.Period != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.Period)) - i-- - dAtA[i] = 0x30 - } - { - size, err := m.PeriodType.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - if m.DurationNano != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.DurationNano)) - i-- - dAtA[i] = 0x20 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x19 - } - if len(m.Sample) > 0 { - for iNdEx := len(m.Sample) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Sample[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.SampleType.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Link) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Link) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size := m.SpanId.Size() - i -= size - if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size := m.TraceId.Size() - i -= size - if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ValueType) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ValueType) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValueType) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AggregationTemporality != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.AggregationTemporality)) - i-- - dAtA[i] = 0x18 - } - if m.UnitStrindex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.UnitStrindex)) - i-- - dAtA[i] = 0x10 - } - if m.TypeStrindex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.TypeStrindex)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Sample) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Sample) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.TimestampsUnixNano) > 0 { - for iNdEx := len(m.TimestampsUnixNano) - 1; iNdEx >= 0; iNdEx-- { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimestampsUnixNano[iNdEx])) - } - i = encodeVarintProfiles(dAtA, i, uint64(len(m.TimestampsUnixNano)*8)) - i-- - dAtA[i] = 0x2a - } - if m.LinkIndex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.LinkIndex)) - i-- - dAtA[i] = 0x20 - } - if len(m.AttributeIndices) > 0 { - dAtA11 := make([]byte, len(m.AttributeIndices)*10) - var j10 int - for _, num1 := range m.AttributeIndices { - num := uint64(num1) - for num >= 1<<7 { - dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j10++ - } - dAtA11[j10] = uint8(num) - j10++ - } - i -= j10 - copy(dAtA[i:], dAtA11[:j10]) - i = encodeVarintProfiles(dAtA, i, uint64(j10)) - i-- - dAtA[i] = 0x1a - } - if len(m.Values) > 0 { - dAtA13 := make([]byte, len(m.Values)*10) - var j12 int - for _, num1 := range m.Values { - num := uint64(num1) - for num >= 1<<7 { - dAtA13[j12] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j12++ - } - dAtA13[j12] = uint8(num) - j12++ - } - i -= j12 - copy(dAtA[i:], dAtA13[:j12]) - i = encodeVarintProfiles(dAtA, i, uint64(j12)) - i-- - dAtA[i] = 0x12 - } - if m.StackIndex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.StackIndex)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Mapping) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Mapping) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Mapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.AttributeIndices) > 0 { - dAtA15 := make([]byte, len(m.AttributeIndices)*10) - var j14 int - for _, num1 := range m.AttributeIndices { - num := uint64(num1) - for num >= 1<<7 { - dAtA15[j14] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j14++ - } - dAtA15[j14] = uint8(num) - j14++ - } - i -= j14 - copy(dAtA[i:], dAtA15[:j14]) - i = encodeVarintProfiles(dAtA, i, uint64(j14)) - i-- - dAtA[i] = 0x2a - } - if m.FilenameStrindex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.FilenameStrindex)) - i-- - dAtA[i] = 0x20 - } - if m.FileOffset != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.FileOffset)) - i-- - dAtA[i] = 0x18 - } - if m.MemoryLimit != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.MemoryLimit)) - i-- - dAtA[i] = 0x10 - } - if m.MemoryStart != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.MemoryStart)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Stack) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Stack) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Stack) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LocationIndices) > 0 { - dAtA17 := make([]byte, len(m.LocationIndices)*10) - var j16 int - for _, num1 := range m.LocationIndices { - num := uint64(num1) - for num >= 1<<7 { - dAtA17[j16] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j16++ - } - dAtA17[j16] = uint8(num) - j16++ - } - i -= j16 - copy(dAtA[i:], dAtA17[:j16]) - i = encodeVarintProfiles(dAtA, i, uint64(j16)) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Location) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Location) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.AttributeIndices) > 0 { - dAtA19 := make([]byte, len(m.AttributeIndices)*10) - var j18 int - for _, num1 := range m.AttributeIndices { - num := uint64(num1) - for num >= 1<<7 { - dAtA19[j18] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j18++ - } - dAtA19[j18] = uint8(num) - j18++ - } - i -= j18 - copy(dAtA[i:], dAtA19[:j18]) - i = encodeVarintProfiles(dAtA, i, uint64(j18)) - i-- - dAtA[i] = 0x22 - } - if len(m.Line) > 0 { - for iNdEx := len(m.Line) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Line[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Address != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.Address)) - i-- - dAtA[i] = 0x10 - } - if m.MappingIndex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.MappingIndex)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Line) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Line) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Line) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Column != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.Column)) - i-- - dAtA[i] = 0x18 - } - if m.Line != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.Line)) - i-- - dAtA[i] = 0x10 - } - if m.FunctionIndex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.FunctionIndex)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Function) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Function) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Function) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.StartLine != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.StartLine)) - i-- - dAtA[i] = 0x20 - } - if m.FilenameStrindex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.FilenameStrindex)) - i-- - dAtA[i] = 0x18 - } - if m.SystemNameStrindex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.SystemNameStrindex)) - i-- - dAtA[i] = 0x10 - } - if m.NameStrindex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.NameStrindex)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *KeyValueAndUnit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KeyValueAndUnit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KeyValueAndUnit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.UnitStrindex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.UnitStrindex)) - i-- - dAtA[i] = 0x18 - } - { - size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if m.KeyStrindex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.KeyStrindex)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintProfiles(dAtA []byte, offset int, v uint64) int { - offset -= sovProfiles(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ProfilesDictionary) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.MappingTable) > 0 { - for _, e := range m.MappingTable { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - if len(m.LocationTable) > 0 { - for _, e := range m.LocationTable { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - if len(m.FunctionTable) > 0 { - for _, e := range m.FunctionTable { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - if len(m.LinkTable) > 0 { - for _, e := range m.LinkTable { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - if len(m.StringTable) > 0 { - for _, s := range m.StringTable { - l = len(s) - n += 1 + l + sovProfiles(uint64(l)) - } - } - if len(m.AttributeTable) > 0 { - for _, e := range m.AttributeTable { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - if len(m.StackTable) > 0 { - for _, e := range m.StackTable { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - return n -} - -func (m *ProfilesData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceProfiles) > 0 { - for _, e := range m.ResourceProfiles { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - l = m.Dictionary.Size() - n += 1 + l + sovProfiles(uint64(l)) - return n -} - -func (m *ResourceProfiles) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Resource.Size() - n += 1 + l + sovProfiles(uint64(l)) - if len(m.ScopeProfiles) > 0 { - for _, e := range m.ScopeProfiles { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - l = len(m.SchemaUrl) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - return n -} - -func (m *ScopeProfiles) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Scope.Size() - n += 1 + l + sovProfiles(uint64(l)) - if len(m.Profiles) > 0 { - for _, e := range m.Profiles { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - l = len(m.SchemaUrl) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - return n -} - -func (m *Profile) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.SampleType.Size() - n += 1 + l + sovProfiles(uint64(l)) - if len(m.Sample) > 0 { - for _, e := range m.Sample { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - if m.TimeUnixNano != 0 { - n += 9 - } - if m.DurationNano != 0 { - n += 1 + sovProfiles(uint64(m.DurationNano)) - } - l = m.PeriodType.Size() - n += 1 + l + sovProfiles(uint64(l)) - if m.Period != 0 { - n += 1 + sovProfiles(uint64(m.Period)) - } - if len(m.CommentStrindices) > 0 { - l = 0 - for _, e := range m.CommentStrindices { - l += sovProfiles(uint64(e)) - } - n += 1 + sovProfiles(uint64(l)) + l - } - l = m.ProfileId.Size() - n += 1 + l + sovProfiles(uint64(l)) - if m.DroppedAttributesCount != 0 { - n += 1 + sovProfiles(uint64(m.DroppedAttributesCount)) - } - l = len(m.OriginalPayloadFormat) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - l = len(m.OriginalPayload) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - if len(m.AttributeIndices) > 0 { - l = 0 - for _, e := range m.AttributeIndices { - l += sovProfiles(uint64(e)) - } - n += 1 + sovProfiles(uint64(l)) + l - } - return n -} - -func (m *Link) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.TraceId.Size() - n += 1 + l + sovProfiles(uint64(l)) - l = m.SpanId.Size() - n += 1 + l + sovProfiles(uint64(l)) - return n -} - -func (m *ValueType) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TypeStrindex != 0 { - n += 1 + sovProfiles(uint64(m.TypeStrindex)) - } - if m.UnitStrindex != 0 { - n += 1 + sovProfiles(uint64(m.UnitStrindex)) - } - if m.AggregationTemporality != 0 { - n += 1 + sovProfiles(uint64(m.AggregationTemporality)) - } - return n -} - -func (m *Sample) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StackIndex != 0 { - n += 1 + sovProfiles(uint64(m.StackIndex)) - } - if len(m.Values) > 0 { - l = 0 - for _, e := range m.Values { - l += sovProfiles(uint64(e)) - } - n += 1 + sovProfiles(uint64(l)) + l - } - if len(m.AttributeIndices) > 0 { - l = 0 - for _, e := range m.AttributeIndices { - l += sovProfiles(uint64(e)) - } - n += 1 + sovProfiles(uint64(l)) + l - } - if m.LinkIndex != 0 { - n += 1 + sovProfiles(uint64(m.LinkIndex)) - } - if len(m.TimestampsUnixNano) > 0 { - n += 1 + sovProfiles(uint64(len(m.TimestampsUnixNano)*8)) + len(m.TimestampsUnixNano)*8 - } - return n -} - -func (m *Mapping) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MemoryStart != 0 { - n += 1 + sovProfiles(uint64(m.MemoryStart)) - } - if m.MemoryLimit != 0 { - n += 1 + sovProfiles(uint64(m.MemoryLimit)) - } - if m.FileOffset != 0 { - n += 1 + sovProfiles(uint64(m.FileOffset)) - } - if m.FilenameStrindex != 0 { - n += 1 + sovProfiles(uint64(m.FilenameStrindex)) - } - if len(m.AttributeIndices) > 0 { - l = 0 - for _, e := range m.AttributeIndices { - l += sovProfiles(uint64(e)) - } - n += 1 + sovProfiles(uint64(l)) + l - } - return n -} - -func (m *Stack) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.LocationIndices) > 0 { - l = 0 - for _, e := range m.LocationIndices { - l += sovProfiles(uint64(e)) - } - n += 1 + sovProfiles(uint64(l)) + l - } - return n -} - -func (m *Location) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MappingIndex != 0 { - n += 1 + sovProfiles(uint64(m.MappingIndex)) - } - if m.Address != 0 { - n += 1 + sovProfiles(uint64(m.Address)) - } - if len(m.Line) > 0 { - for _, e := range m.Line { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - if len(m.AttributeIndices) > 0 { - l = 0 - for _, e := range m.AttributeIndices { - l += sovProfiles(uint64(e)) - } - n += 1 + sovProfiles(uint64(l)) + l - } - return n -} - -func (m *Line) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FunctionIndex != 0 { - n += 1 + sovProfiles(uint64(m.FunctionIndex)) - } - if m.Line != 0 { - n += 1 + sovProfiles(uint64(m.Line)) - } - if m.Column != 0 { - n += 1 + sovProfiles(uint64(m.Column)) - } - return n -} - -func (m *Function) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NameStrindex != 0 { - n += 1 + sovProfiles(uint64(m.NameStrindex)) - } - if m.SystemNameStrindex != 0 { - n += 1 + sovProfiles(uint64(m.SystemNameStrindex)) - } - if m.FilenameStrindex != 0 { - n += 1 + sovProfiles(uint64(m.FilenameStrindex)) - } - if m.StartLine != 0 { - n += 1 + sovProfiles(uint64(m.StartLine)) - } - return n -} - -func (m *KeyValueAndUnit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.KeyStrindex != 0 { - n += 1 + sovProfiles(uint64(m.KeyStrindex)) - } - l = m.Value.Size() - n += 1 + l + sovProfiles(uint64(l)) - if m.UnitStrindex != 0 { - n += 1 + sovProfiles(uint64(m.UnitStrindex)) - } - return n -} - -func sovProfiles(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozProfiles(x uint64) (n int) { - return sovProfiles(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ProfilesDictionary) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProfilesDictionary: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProfilesDictionary: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MappingTable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MappingTable = append(m.MappingTable, &Mapping{}) - if err := m.MappingTable[len(m.MappingTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocationTable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LocationTable = append(m.LocationTable, &Location{}) - if err := m.LocationTable[len(m.LocationTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FunctionTable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FunctionTable = append(m.FunctionTable, &Function{}) - if err := m.FunctionTable[len(m.FunctionTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LinkTable = append(m.LinkTable, &Link{}) - if err := m.LinkTable[len(m.LinkTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StringTable = append(m.StringTable, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AttributeTable = append(m.AttributeTable, &KeyValueAndUnit{}) - if err := m.AttributeTable[len(m.AttributeTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StackTable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StackTable = append(m.StackTable, &Stack{}) - if err := m.StackTable[len(m.StackTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProfilesData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProfilesData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProfilesData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceProfiles = append(m.ResourceProfiles, &ResourceProfiles{}) - if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Dictionary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceProfiles) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceProfiles: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceProfiles: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ScopeProfiles = append(m.ScopeProfiles, &ScopeProfiles{}) - if err := m.ScopeProfiles[len(m.ScopeProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScopeProfiles) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScopeProfiles: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScopeProfiles: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Profiles = append(m.Profiles, &Profile{}) - if err := m.Profiles[len(m.Profiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Profile) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Profile: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Profile: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SampleType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Sample = append(m.Sample, &Sample{}) - if err := m.Sample[len(m.Sample)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationNano", wireType) - } - m.DurationNano = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationNano |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PeriodType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) - } - m.Period = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Period |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType == 0 { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CommentStrindices = append(m.CommentStrindices, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.CommentStrindices) == 0 { - m.CommentStrindices = make([]int32, 0, elementCount) - } - for iNdEx < postIndex { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CommentStrindices = append(m.CommentStrindices, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field CommentStrindices", wireType) - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ProfileId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OriginalPayloadFormat = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OriginalPayload = append(m.OriginalPayload[:0], dAtA[iNdEx:postIndex]...) - if m.OriginalPayload == nil { - m.OriginalPayload = []byte{} - } - iNdEx = postIndex - case 12: - if wireType == 0 { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AttributeIndices = append(m.AttributeIndices, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.AttributeIndices) == 0 { - m.AttributeIndices = make([]int32, 0, elementCount) - } - for iNdEx < postIndex { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AttributeIndices = append(m.AttributeIndices, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Link) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Link: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValueType) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValueType: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValueType: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TypeStrindex", wireType) - } - m.TypeStrindex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TypeStrindex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType) - } - m.UnitStrindex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnitStrindex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) - } - m.AggregationTemporality = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Sample) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Sample: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StackIndex", wireType) - } - m.StackIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StackIndex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType == 0 { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Values = append(m.Values, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Values) == 0 { - m.Values = make([]int64, 0, elementCount) - } - for iNdEx < postIndex { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Values = append(m.Values, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - case 3: - if wireType == 0 { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AttributeIndices = append(m.AttributeIndices, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.AttributeIndices) == 0 { - m.AttributeIndices = make([]int32, 0, elementCount) - } - for iNdEx < postIndex { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AttributeIndices = append(m.AttributeIndices, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType) - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LinkIndex", wireType) - } - m.LinkIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LinkIndex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType == 1 { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.TimestampsUnixNano = append(m.TimestampsUnixNano, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - elementCount = packedLen / 8 - if elementCount != 0 && len(m.TimestampsUnixNano) == 0 { - m.TimestampsUnixNano = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.TimestampsUnixNano = append(m.TimestampsUnixNano, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field TimestampsUnixNano", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Mapping) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Mapping: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Mapping: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryStart", wireType) - } - m.MemoryStart = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemoryStart |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryLimit", wireType) - } - m.MemoryLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemoryLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FileOffset", wireType) - } - m.FileOffset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FileOffset |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType) - } - m.FilenameStrindex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FilenameStrindex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType == 0 { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AttributeIndices = append(m.AttributeIndices, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.AttributeIndices) == 0 { - m.AttributeIndices = make([]int32, 0, elementCount) - } - for iNdEx < postIndex { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AttributeIndices = append(m.AttributeIndices, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Stack) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Stack: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Stack: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType == 0 { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LocationIndices = append(m.LocationIndices, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.LocationIndices) == 0 { - m.LocationIndices = make([]int32, 0, elementCount) - } - for iNdEx < postIndex { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LocationIndices = append(m.LocationIndices, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Location) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Location: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Location: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MappingIndex", wireType) - } - m.MappingIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MappingIndex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) - } - m.Address = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Address |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Line = append(m.Line, &Line{}) - if err := m.Line[len(m.Line)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType == 0 { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AttributeIndices = append(m.AttributeIndices, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.AttributeIndices) == 0 { - m.AttributeIndices = make([]int32, 0, elementCount) - } - for iNdEx < postIndex { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AttributeIndices = append(m.AttributeIndices, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Line) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Line: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Line: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FunctionIndex", wireType) - } - m.FunctionIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FunctionIndex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) - } - m.Line = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Line |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType) - } - m.Column = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Column |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Function) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Function: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Function: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NameStrindex", wireType) - } - m.NameStrindex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NameStrindex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SystemNameStrindex", wireType) - } - m.SystemNameStrindex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SystemNameStrindex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType) - } - m.FilenameStrindex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FilenameStrindex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartLine", wireType) - } - m.StartLine = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartLine |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KeyValueAndUnit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KeyValueAndUnit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KeyValueAndUnit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyStrindex", wireType) - } - m.KeyStrindex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeyStrindex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType) - } - m.UnitStrindex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnitStrindex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipProfiles(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProfiles - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProfiles - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProfiles - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthProfiles - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupProfiles - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthProfiles - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthProfiles = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowProfiles = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupProfiles = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go deleted file mode 100644 index ce55b3c3496..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go +++ /dev/null @@ -1,460 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/resource/v1/resource.proto - -package v1 - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - - v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Resource information. -type Resource struct { - // Set of attributes that describe the resource. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. - Attributes []v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, then - // no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // Set of entities that participate in this Resource. - // - // Note: keys in the references MUST exist in attributes of this message. - // - // Status: [Development] - EntityRefs []*v1.EntityRef `protobuf:"bytes,3,rep,name=entity_refs,json=entityRefs,proto3" json:"entity_refs,omitempty"` -} - -func (m *Resource) Reset() { *m = Resource{} } -func (m *Resource) String() string { return proto.CompactTextString(m) } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { - return fileDescriptor_446f73eacf88f3f5, []int{0} -} -func (m *Resource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Resource.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Resource) XXX_Merge(src proto.Message) { - xxx_messageInfo_Resource.Merge(m, src) -} -func (m *Resource) XXX_Size() int { - return m.Size() -} -func (m *Resource) XXX_DiscardUnknown() { - xxx_messageInfo_Resource.DiscardUnknown(m) -} - -var xxx_messageInfo_Resource proto.InternalMessageInfo - -func (m *Resource) GetAttributes() []v1.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Resource) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -func (m *Resource) GetEntityRefs() []*v1.EntityRef { - if m != nil { - return m.EntityRefs - } - return nil -} - -func init() { - proto.RegisterType((*Resource)(nil), "opentelemetry.proto.resource.v1.Resource") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/resource/v1/resource.proto", fileDescriptor_446f73eacf88f3f5) -} - -var fileDescriptor_446f73eacf88f3f5 = []byte{ - // 334 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0xc1, 0x6a, 0xfa, 0x40, - 0x10, 0xc6, 0xb3, 0xfa, 0xe7, 0x4f, 0x59, 0xf1, 0x12, 0x4a, 0x09, 0x1e, 0xa2, 0x78, 0xa9, 0xf4, - 0xb0, 0x21, 0xed, 0xa5, 0xd7, 0x5a, 0x5a, 0x28, 0xa5, 0x54, 0x42, 0xf1, 0xd0, 0x8b, 0xc4, 0x38, - 0x86, 0x40, 0xdc, 0x09, 0x9b, 0x89, 0xe0, 0x5b, 0xf4, 0x39, 0xfa, 0x02, 0x7d, 0x05, 0x8f, 0x1e, - 0x7b, 0x92, 0xa2, 0x2f, 0x52, 0xb2, 0x31, 0xa9, 0x2d, 0x82, 0xb7, 0x6f, 0xe7, 0xfb, 0xe6, 0x37, - 0xc3, 0x2c, 0x17, 0x98, 0x80, 0x24, 0x88, 0x61, 0x06, 0xa4, 0x16, 0x4e, 0xa2, 0x90, 0xd0, 0x51, - 0x90, 0x62, 0xa6, 0x02, 0x70, 0xe6, 0x6e, 0xa5, 0x85, 0xb6, 0xcc, 0xf6, 0xaf, 0x7c, 0x51, 0x14, - 0x55, 0x66, 0xee, 0xb6, 0x4e, 0x43, 0x0c, 0xb1, 0xc0, 0xe4, 0xaa, 0x48, 0xb4, 0x2e, 0x0e, 0x8d, - 0x09, 0x70, 0x36, 0x43, 0x99, 0x0f, 0x29, 0x54, 0x91, 0xed, 0xae, 0x19, 0x3f, 0xf1, 0x76, 0x44, - 0xf3, 0x89, 0x73, 0x9f, 0x48, 0x45, 0xe3, 0x8c, 0x20, 0xb5, 0x58, 0xa7, 0xde, 0x6b, 0x5c, 0x9e, - 0x8b, 0x43, 0x4b, 0xec, 0x18, 0x73, 0x57, 0x3c, 0xc2, 0x62, 0xe8, 0xc7, 0x19, 0xf4, 0xff, 0x2d, - 0xd7, 0x6d, 0xc3, 0xdb, 0x03, 0x98, 0xd7, 0xdc, 0x9a, 0x28, 0x4c, 0x12, 0x98, 0x8c, 0x7e, 0xaa, - 0xa3, 0x00, 0x33, 0x49, 0x56, 0xad, 0xc3, 0x7a, 0x4d, 0xef, 0x6c, 0xe7, 0xdf, 0x54, 0xf6, 0x6d, - 0xee, 0x9a, 0x0f, 0xbc, 0x01, 0x92, 0x22, 0x5a, 0x8c, 0x14, 0x4c, 0x53, 0xab, 0xae, 0x37, 0xe9, - 0x1d, 0xd9, 0xe4, 0x4e, 0x77, 0x78, 0x30, 0xf5, 0x38, 0x94, 0x32, 0xed, 0x7f, 0xb0, 0xe5, 0xc6, - 0x66, 0xab, 0x8d, 0xcd, 0xbe, 0x36, 0x36, 0x7b, 0xdb, 0xda, 0xc6, 0x6a, 0x6b, 0x1b, 0x9f, 0x5b, - 0xdb, 0xe0, 0xdd, 0x08, 0xc5, 0x91, 0x0b, 0xf7, 0x9b, 0xe5, 0x71, 0x06, 0xb9, 0x35, 0x60, 0xaf, - 0xf7, 0xe1, 0xdf, 0xa6, 0x28, 0x3f, 0x6e, 0x1c, 0x43, 0x40, 0xa8, 0x9c, 0x64, 0xe2, 0x93, 0xef, - 0x44, 0x92, 0x40, 0x49, 0x3f, 0x76, 0xf4, 0x4b, 0x53, 0x43, 0x90, 0xfb, 0x5f, 0xfd, 0x5e, 0x6b, - 0x3f, 0x27, 0x20, 0x5f, 0x2a, 0x8a, 0xe6, 0x8b, 0x72, 0x9a, 0x18, 0xba, 0xe3, 0xff, 0xba, 0xef, - 0xea, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x10, 0xa9, 0xec, 0x36, 0x02, 0x00, 0x00, -} - -func (m *Resource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Resource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.EntityRefs) > 0 { - for iNdEx := len(m.EntityRefs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.EntityRefs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.DroppedAttributesCount != 0 { - i = encodeVarintResource(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x10 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintResource(dAtA []byte, offset int, v uint64) int { - offset -= sovResource(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Resource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovResource(uint64(l)) - } - } - if m.DroppedAttributesCount != 0 { - n += 1 + sovResource(uint64(m.DroppedAttributesCount)) - } - if len(m.EntityRefs) > 0 { - for _, e := range m.EntityRefs { - l = e.Size() - n += 1 + l + sovResource(uint64(l)) - } - } - return n -} - -func sovResource(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozResource(x uint64) (n int) { - return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Resource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Resource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EntityRefs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EntityRefs = append(m.EntityRefs, &v1.EntityRef{}) - if err := m.EntityRefs[len(m.EntityRefs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipResource(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResource - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipResource(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthResource - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupResource - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthResource - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowResource = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupResource = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go deleted file mode 100644 index 6c7767be1b3..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go +++ /dev/null @@ -1,3073 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/trace/v1/trace.proto - -package v1 - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - - go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data" - v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// SpanFlags represents constants used to interpret the -// Span.flags field, which is protobuf 'fixed32' type and is to -// be used as bit-fields. Each non-zero value defined in this enum is -// a bit-mask. To extract the bit-field, for example, use an -// expression like: -// -// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) -// -// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. -// -// Note that Span flags were introduced in version 1.1 of the -// OpenTelemetry protocol. Older Span producers do not set this -// field, consequently consumers should not rely on the absence of a -// particular flag bit to indicate the presence of a particular feature. -type SpanFlags int32 - -const ( - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - SpanFlags_SPAN_FLAGS_DO_NOT_USE SpanFlags = 0 - // Bits 0-7 are used for trace flags. - SpanFlags_SPAN_FLAGS_TRACE_FLAGS_MASK SpanFlags = 255 - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. - SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK SpanFlags = 256 - SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK SpanFlags = 512 -) - -var SpanFlags_name = map[int32]string{ - 0: "SPAN_FLAGS_DO_NOT_USE", - 255: "SPAN_FLAGS_TRACE_FLAGS_MASK", - 256: "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK", - 512: "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK", -} - -var SpanFlags_value = map[string]int32{ - "SPAN_FLAGS_DO_NOT_USE": 0, - "SPAN_FLAGS_TRACE_FLAGS_MASK": 255, - "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK": 256, - "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK": 512, -} - -func (x SpanFlags) String() string { - return proto.EnumName(SpanFlags_name, int32(x)) -} - -func (SpanFlags) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{0} -} - -// SpanKind is the type of span. Can be used to specify additional relationships between spans -// in addition to a parent/child relationship. -type Span_SpanKind int32 - -const ( - // Unspecified. Do NOT use as default. - // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. - Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. - Span_SPAN_KIND_INTERNAL Span_SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. - Span_SPAN_KIND_SERVER Span_SpanKind = 2 - // Indicates that the span describes a request to some remote service. - Span_SPAN_KIND_CLIENT Span_SpanKind = 3 - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. - Span_SPAN_KIND_PRODUCER Span_SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. - Span_SPAN_KIND_CONSUMER Span_SpanKind = 5 -) - -var Span_SpanKind_name = map[int32]string{ - 0: "SPAN_KIND_UNSPECIFIED", - 1: "SPAN_KIND_INTERNAL", - 2: "SPAN_KIND_SERVER", - 3: "SPAN_KIND_CLIENT", - 4: "SPAN_KIND_PRODUCER", - 5: "SPAN_KIND_CONSUMER", -} - -var Span_SpanKind_value = map[string]int32{ - "SPAN_KIND_UNSPECIFIED": 0, - "SPAN_KIND_INTERNAL": 1, - "SPAN_KIND_SERVER": 2, - "SPAN_KIND_CLIENT": 3, - "SPAN_KIND_PRODUCER": 4, - "SPAN_KIND_CONSUMER": 5, -} - -func (x Span_SpanKind) String() string { - return proto.EnumName(Span_SpanKind_name, int32(x)) -} - -func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{3, 0} -} - -// For the semantics of status codes see -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status -type Status_StatusCode int32 - -const ( - // The default status. - Status_STATUS_CODE_UNSET Status_StatusCode = 0 - // The Span has been validated by an Application developer or Operator to - // have completed successfully. - Status_STATUS_CODE_OK Status_StatusCode = 1 - // The Span contains an error. - Status_STATUS_CODE_ERROR Status_StatusCode = 2 -) - -var Status_StatusCode_name = map[int32]string{ - 0: "STATUS_CODE_UNSET", - 1: "STATUS_CODE_OK", - 2: "STATUS_CODE_ERROR", -} - -var Status_StatusCode_value = map[string]int32{ - "STATUS_CODE_UNSET": 0, - "STATUS_CODE_OK": 1, - "STATUS_CODE_ERROR": 2, -} - -func (x Status_StatusCode) String() string { - return proto.EnumName(Status_StatusCode_name, int32(x)) -} - -func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{4, 0} -} - -// TracesData represents the traces data that can be stored in a persistent storage, -// OR can be embedded by other protocols that transfer OTLP traces data but do -// not implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -type TracesData struct { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` -} - -func (m *TracesData) Reset() { *m = TracesData{} } -func (m *TracesData) String() string { return proto.CompactTextString(m) } -func (*TracesData) ProtoMessage() {} -func (*TracesData) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{0} -} -func (m *TracesData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TracesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TracesData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TracesData) XXX_Merge(src proto.Message) { - xxx_messageInfo_TracesData.Merge(m, src) -} -func (m *TracesData) XXX_Size() int { - return m.Size() -} -func (m *TracesData) XXX_DiscardUnknown() { - xxx_messageInfo_TracesData.DiscardUnknown(m) -} - -var xxx_messageInfo_TracesData proto.InternalMessageInfo - -func (m *TracesData) GetResourceSpans() []*ResourceSpans { - if m != nil { - return m.ResourceSpans - } - return nil -} - -// A collection of ScopeSpans from a Resource. -type ResourceSpans struct { - DeprecatedScopeSpans []*ScopeSpans `protobuf:"bytes,1000,rep,name=deprecated_scope_spans,json=deprecatedScopeSpans,proto3" json:"deprecated_scope_spans,omitempty"` - // The resource for the spans in this message. - // If this field is not set then no resource info is known. - Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` - // A list of ScopeSpans that originate from a resource. - ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_spans" field which have their own schema_url field. - SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` -} - -func (m *ResourceSpans) Reset() { *m = ResourceSpans{} } -func (m *ResourceSpans) String() string { return proto.CompactTextString(m) } -func (*ResourceSpans) ProtoMessage() {} -func (*ResourceSpans) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{1} -} -func (m *ResourceSpans) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceSpans.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceSpans) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceSpans.Merge(m, src) -} -func (m *ResourceSpans) XXX_Size() int { - return m.Size() -} -func (m *ResourceSpans) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceSpans.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceSpans proto.InternalMessageInfo - -func (m *ResourceSpans) GetDeprecatedScopeSpans() []*ScopeSpans { - if m != nil { - return m.DeprecatedScopeSpans - } - return nil -} - -func (m *ResourceSpans) GetResource() v1.Resource { - if m != nil { - return m.Resource - } - return v1.Resource{} -} - -func (m *ResourceSpans) GetScopeSpans() []*ScopeSpans { - if m != nil { - return m.ScopeSpans - } - return nil -} - -func (m *ResourceSpans) GetSchemaUrl() string { - if m != nil { - return m.SchemaUrl - } - return "" -} - -// A collection of Spans produced by an InstrumentationScope. -type ScopeSpans struct { - // The instrumentation scope information for the spans in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"` - // A list of Spans that originate from an instrumentation scope. - Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all spans and span events in the "spans" field. - SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` -} - -func (m *ScopeSpans) Reset() { *m = ScopeSpans{} } -func (m *ScopeSpans) String() string { return proto.CompactTextString(m) } -func (*ScopeSpans) ProtoMessage() {} -func (*ScopeSpans) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{2} -} -func (m *ScopeSpans) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScopeSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ScopeSpans.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ScopeSpans) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScopeSpans.Merge(m, src) -} -func (m *ScopeSpans) XXX_Size() int { - return m.Size() -} -func (m *ScopeSpans) XXX_DiscardUnknown() { - xxx_messageInfo_ScopeSpans.DiscardUnknown(m) -} - -var xxx_messageInfo_ScopeSpans proto.InternalMessageInfo - -func (m *ScopeSpans) GetScope() v11.InstrumentationScope { - if m != nil { - return m.Scope - } - return v11.InstrumentationScope{} -} - -func (m *ScopeSpans) GetSpans() []*Span { - if m != nil { - return m.Spans - } - return nil -} - -func (m *ScopeSpans) GetSchemaUrl() string { - if m != nil { - return m.SchemaUrl - } - return "" -} - -// A Span represents a single operation performed by a single component of the system. -// -// The next available field id is 17. -type Span struct { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"` - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes OR of length - // other than 8 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"` - // trace_state conveys information about request position in multiple distributed tracing graphs. - // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header - // See also https://github.com/w3c/distributed-tracing for more details about this field. - TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` - // The `span_id` of this span's parent span. If this is a root span, then this - // field must be empty. The ID is an 8-byte array. - ParentSpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"parent_span_id"` - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether a span's parent - // is remote. The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // When creating span messages, if the message is logically forwarded from another source - // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD - // be copied as-is. If creating from a source that does not have an equivalent flags field - // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST - // be set to zero. - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // - // [Optional]. - Flags uint32 `protobuf:"fixed32,16,opt,name=flags,proto3" json:"flags,omitempty"` - // A description of the span's operation. - // - // For example, the name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name at the same call point in an application. - // This makes it easier to correlate spans in different traces. - // - // This field is semantically required to be set to non-empty string. - // Empty value is equivalent to an unknown span name. - // - // This field is required. - Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` - // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify queueing latency associated with the span. - Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` - // start_time_unix_nano is the start time of the span. On the client side, this is the time - // kept by the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // end_time_unix_nano is the end time of the span. On the client side, this is the time - // kept by the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` - // attributes is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "example.com/myattribute": true - // "example.com/score": 10.239 - // - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. - Attributes []v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes"` - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // events is a collection of Event items. - Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` - // dropped_events_count is the number of dropped events. If the value is 0, then no - // events were dropped. - DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` - // links is a collection of Links, which are references from this span to a span - // in the same or different trace. - Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` - // dropped_links_count is the number of dropped links after the maximum size was - // enforced. If this value is 0, then no links were dropped. - DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` - // An optional final status for this span. Semantically when Status isn't set, it means - // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). - Status Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status"` -} - -func (m *Span) Reset() { *m = Span{} } -func (m *Span) String() string { return proto.CompactTextString(m) } -func (*Span) ProtoMessage() {} -func (*Span) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{3} -} -func (m *Span) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Span.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Span) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span.Merge(m, src) -} -func (m *Span) XXX_Size() int { - return m.Size() -} -func (m *Span) XXX_DiscardUnknown() { - xxx_messageInfo_Span.DiscardUnknown(m) -} - -var xxx_messageInfo_Span proto.InternalMessageInfo - -func (m *Span) GetTraceState() string { - if m != nil { - return m.TraceState - } - return "" -} - -func (m *Span) GetFlags() uint32 { - if m != nil { - return m.Flags - } - return 0 -} - -func (m *Span) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Span) GetKind() Span_SpanKind { - if m != nil { - return m.Kind - } - return Span_SPAN_KIND_UNSPECIFIED -} - -func (m *Span) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *Span) GetEndTimeUnixNano() uint64 { - if m != nil { - return m.EndTimeUnixNano - } - return 0 -} - -func (m *Span) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Span) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -func (m *Span) GetEvents() []*Span_Event { - if m != nil { - return m.Events - } - return nil -} - -func (m *Span) GetDroppedEventsCount() uint32 { - if m != nil { - return m.DroppedEventsCount - } - return 0 -} - -func (m *Span) GetLinks() []*Span_Link { - if m != nil { - return m.Links - } - return nil -} - -func (m *Span) GetDroppedLinksCount() uint32 { - if m != nil { - return m.DroppedLinksCount - } - return 0 -} - -func (m *Span) GetStatus() Status { - if m != nil { - return m.Status - } - return Status{} -} - -// Event is a time-stamped annotation of the span, consisting of user-supplied -// text description and key-value pairs. -type Span_Event struct { - // time_unix_nano is the time the event occurred. - TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // name of the event. - // This field is semantically required to be set to non-empty string. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // attributes is a collection of attribute key/value pairs on the event. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. - Attributes []v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` -} - -func (m *Span_Event) Reset() { *m = Span_Event{} } -func (m *Span_Event) String() string { return proto.CompactTextString(m) } -func (*Span_Event) ProtoMessage() {} -func (*Span_Event) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{3, 0} -} -func (m *Span_Event) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Span_Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Span_Event.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Span_Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Event.Merge(m, src) -} -func (m *Span_Event) XXX_Size() int { - return m.Size() -} -func (m *Span_Event) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Event.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Event proto.InternalMessageInfo - -func (m *Span_Event) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *Span_Event) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Span_Event) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Span_Event) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. -type Span_Link struct { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"` - // A unique identifier for the linked span. The ID is an 8-byte array. - SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"` - // The trace_state associated with the link. - TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` - // attributes is a collection of attribute key/value pairs on the link. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. - Attributes []v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether the link is remote. - // The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. - // - // [Optional]. - Flags uint32 `protobuf:"fixed32,6,opt,name=flags,proto3" json:"flags,omitempty"` -} - -func (m *Span_Link) Reset() { *m = Span_Link{} } -func (m *Span_Link) String() string { return proto.CompactTextString(m) } -func (*Span_Link) ProtoMessage() {} -func (*Span_Link) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{3, 1} -} -func (m *Span_Link) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Span_Link) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Link.Merge(m, src) -} -func (m *Span_Link) XXX_Size() int { - return m.Size() -} -func (m *Span_Link) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Link.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Link proto.InternalMessageInfo - -func (m *Span_Link) GetTraceState() string { - if m != nil { - return m.TraceState - } - return "" -} - -func (m *Span_Link) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Span_Link) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -func (m *Span_Link) GetFlags() uint32 { - if m != nil { - return m.Flags - } - return 0 -} - -// The Status type defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. -type Status struct { - // A developer-facing human readable error message. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // The status code. - Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"` -} - -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{4} -} -func (m *Status) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) -} -func (m *Status) XXX_Size() int { - return m.Size() -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) -} - -var xxx_messageInfo_Status proto.InternalMessageInfo - -func (m *Status) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *Status) GetCode() Status_StatusCode { - if m != nil { - return m.Code - } - return Status_STATUS_CODE_UNSET -} - -func init() { - proto.RegisterEnum("opentelemetry.proto.trace.v1.SpanFlags", SpanFlags_name, SpanFlags_value) - proto.RegisterEnum("opentelemetry.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) - proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_StatusCode", Status_StatusCode_name, Status_StatusCode_value) - proto.RegisterType((*TracesData)(nil), "opentelemetry.proto.trace.v1.TracesData") - proto.RegisterType((*ResourceSpans)(nil), "opentelemetry.proto.trace.v1.ResourceSpans") - proto.RegisterType((*ScopeSpans)(nil), "opentelemetry.proto.trace.v1.ScopeSpans") - proto.RegisterType((*Span)(nil), "opentelemetry.proto.trace.v1.Span") - proto.RegisterType((*Span_Event)(nil), "opentelemetry.proto.trace.v1.Span.Event") - proto.RegisterType((*Span_Link)(nil), "opentelemetry.proto.trace.v1.Span.Link") - proto.RegisterType((*Status)(nil), "opentelemetry.proto.trace.v1.Status") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/trace/v1/trace.proto", fileDescriptor_5c407ac9c675a601) -} - -var fileDescriptor_5c407ac9c675a601 = []byte{ - // 1112 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x56, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xf6, 0x3a, 0x6b, 0x3b, 0x79, 0x49, 0xdc, 0xed, 0xe0, 0x56, 0x4b, 0x28, 0x8e, 0xb1, 0x0a, - 0x98, 0x56, 0xb2, 0x49, 0x7b, 0x29, 0x07, 0x44, 0x1d, 0x7b, 0x03, 0x8b, 0x13, 0x3b, 0x9a, 0x5d, - 0x47, 0x80, 0x90, 0x96, 0xad, 0x77, 0x6a, 0x56, 0xb1, 0x67, 0xad, 0xdd, 0x71, 0xd4, 0xde, 0xf8, - 0x13, 0xb8, 0x22, 0x71, 0x47, 0x02, 0xce, 0xdc, 0xb8, 0x57, 0x9c, 0x7a, 0x44, 0x1c, 0x2a, 0x94, - 0x5c, 0xf8, 0x2f, 0x8a, 0x66, 0x66, 0xd7, 0x5e, 0x47, 0x91, 0xd3, 0x48, 0xf4, 0xc2, 0x25, 0x99, - 0x79, 0x3f, 0xbe, 0xef, 0x7b, 0x6f, 0xde, 0x8c, 0x17, 0x6a, 0xc1, 0x84, 0x50, 0x46, 0x46, 0x64, - 0x4c, 0x58, 0xf8, 0xb4, 0x31, 0x09, 0x03, 0x16, 0x34, 0x58, 0xe8, 0x0e, 0x48, 0xe3, 0x64, 0x47, - 0x2e, 0xea, 0xc2, 0x88, 0x6e, 0x2d, 0x44, 0x4a, 0x63, 0x5d, 0x06, 0x9c, 0xec, 0x6c, 0x95, 0x86, - 0xc1, 0x30, 0x90, 0xd9, 0x7c, 0x25, 0xdd, 0x5b, 0x77, 0x2e, 0x42, 0x1f, 0x04, 0xe3, 0x71, 0x40, - 0x39, 0xbc, 0x5c, 0xc5, 0xb1, 0xf5, 0x8b, 0x62, 0x43, 0x12, 0x05, 0xd3, 0x50, 0x8a, 0x49, 0xd6, - 0x32, 0xbe, 0xfa, 0x0d, 0x80, 0xcd, 0xd9, 0xa3, 0xb6, 0xcb, 0x5c, 0x84, 0xa1, 0x98, 0xf8, 0x9d, - 0x68, 0xe2, 0xd2, 0x48, 0x57, 0x2a, 0x2b, 0xb5, 0xf5, 0x7b, 0x77, 0xeb, 0xcb, 0x64, 0xd7, 0x71, - 0x9c, 0x63, 0xf1, 0x14, 0xbc, 0x19, 0xa6, 0xb7, 0xd5, 0x9f, 0xb2, 0xb0, 0xb9, 0x10, 0x80, 0x1c, - 0xb8, 0xe9, 0x91, 0x49, 0x48, 0x06, 0x2e, 0x23, 0x9e, 0x13, 0x0d, 0x82, 0x49, 0xc2, 0xf6, 0x4f, - 0x41, 0xd0, 0xd5, 0x96, 0xd3, 0x59, 0x3c, 0x43, 0x72, 0x95, 0xe6, 0x40, 0x73, 0x2b, 0xea, 0xc0, - 0x6a, 0xa2, 0x41, 0x57, 0x2a, 0x4a, 0x6d, 0xfd, 0xde, 0x07, 0x17, 0x22, 0xce, 0x7a, 0x91, 0xaa, - 0x61, 0x57, 0x7d, 0xf6, 0x62, 0x3b, 0x83, 0x67, 0x00, 0xc8, 0x84, 0xf5, 0xb4, 0xc4, 0xec, 0x15, - 0x15, 0x42, 0x34, 0xd7, 0xf5, 0x36, 0x40, 0x34, 0xf8, 0x96, 0x8c, 0x5d, 0x67, 0x1a, 0x8e, 0xf4, - 0x95, 0x8a, 0x52, 0x5b, 0xc3, 0x6b, 0xd2, 0xd2, 0x0f, 0x47, 0xd5, 0xdf, 0x14, 0x80, 0x54, 0x15, - 0x3d, 0xc8, 0x89, 0xdc, 0xb8, 0x84, 0xfb, 0x17, 0x52, 0xc6, 0x87, 0x7f, 0xb2, 0x53, 0x37, 0x69, - 0xc4, 0xc2, 0xe9, 0x98, 0x50, 0xe6, 0x32, 0x3f, 0xa0, 0x02, 0x28, 0x2e, 0x46, 0xe2, 0xa0, 0x07, - 0x90, 0x4b, 0xd7, 0x50, 0xbd, 0xa4, 0x86, 0x89, 0x4b, 0xb1, 0x4c, 0xb8, 0x4c, 0xf8, 0xaf, 0x9b, - 0xa0, 0xf2, 0x70, 0xf4, 0x35, 0xac, 0x8a, 0x7c, 0xc7, 0xf7, 0x84, 0xea, 0x8d, 0xdd, 0x26, 0x17, - 0xf0, 0xd7, 0x8b, 0xed, 0x8f, 0x86, 0xc1, 0x39, 0x3a, 0x9f, 0xcf, 0xf0, 0x68, 0x44, 0x06, 0x2c, - 0x08, 0x1b, 0x13, 0xcf, 0x65, 0x6e, 0xc3, 0xa7, 0x8c, 0x84, 0xd4, 0x1d, 0x35, 0xf8, 0xae, 0x2e, - 0xe6, 0xd2, 0x6c, 0xe3, 0x82, 0x80, 0x34, 0x3d, 0xf4, 0x25, 0x14, 0xb8, 0x1c, 0x0e, 0x9e, 0x15, - 0xe0, 0x0f, 0x63, 0xf0, 0x07, 0x57, 0x07, 0xe7, 0x72, 0xcd, 0x36, 0xce, 0x73, 0x40, 0xd3, 0x43, - 0xdb, 0xb0, 0x2e, 0x85, 0x47, 0xcc, 0x65, 0x24, 0xae, 0x10, 0x84, 0xc9, 0xe2, 0x16, 0xf4, 0x18, - 0x8a, 0x13, 0x37, 0x24, 0x94, 0x39, 0x89, 0x04, 0xf5, 0x3f, 0x92, 0xb0, 0x21, 0x71, 0x2d, 0x29, - 0xa4, 0x04, 0xb9, 0xc7, 0x23, 0x77, 0x18, 0xe9, 0x5a, 0x45, 0xa9, 0x15, 0xb0, 0xdc, 0x20, 0x04, - 0x2a, 0x75, 0xc7, 0x44, 0xcf, 0x09, 0x5d, 0x62, 0x8d, 0x3e, 0x01, 0xf5, 0xd8, 0xa7, 0x9e, 0x9e, - 0xaf, 0x28, 0xb5, 0xe2, 0x65, 0x37, 0x94, 0xa3, 0x8b, 0x3f, 0x1d, 0x9f, 0x7a, 0x58, 0x24, 0xa2, - 0x06, 0x94, 0x22, 0xe6, 0x86, 0xcc, 0x61, 0xfe, 0x98, 0x38, 0x53, 0xea, 0x3f, 0x71, 0xa8, 0x4b, - 0x03, 0xbd, 0x50, 0x51, 0x6a, 0x79, 0x7c, 0x5d, 0xf8, 0x6c, 0x7f, 0x4c, 0xfa, 0xd4, 0x7f, 0xd2, - 0x75, 0x69, 0x80, 0xee, 0x02, 0x22, 0xd4, 0x3b, 0x1f, 0xbe, 0x2a, 0xc2, 0xaf, 0x11, 0xea, 0x2d, - 0x04, 0x1f, 0x00, 0xb8, 0x8c, 0x85, 0xfe, 0xa3, 0x29, 0x23, 0x91, 0xbe, 0x26, 0x26, 0xee, 0xfd, - 0x4b, 0x46, 0xb8, 0x43, 0x9e, 0x1e, 0xb9, 0xa3, 0x69, 0x32, 0xb6, 0x29, 0x00, 0xf4, 0x00, 0x74, - 0x2f, 0x0c, 0x26, 0x13, 0xe2, 0x39, 0x73, 0xab, 0x33, 0x08, 0xa6, 0x94, 0xe9, 0x50, 0x51, 0x6a, - 0x9b, 0xf8, 0x66, 0xec, 0x6f, 0xce, 0xdc, 0x2d, 0xee, 0x45, 0x0f, 0x21, 0x4f, 0x4e, 0x08, 0x65, - 0x91, 0xbe, 0xfe, 0x4a, 0x57, 0x97, 0x77, 0xca, 0xe0, 0x09, 0x38, 0xce, 0x43, 0x1f, 0x42, 0x29, - 0xe1, 0x96, 0x96, 0x98, 0x77, 0x43, 0xf0, 0xa2, 0xd8, 0x27, 0x72, 0x62, 0xce, 0x8f, 0x21, 0x37, - 0xf2, 0xe9, 0x71, 0xa4, 0x6f, 0x2e, 0xa9, 0x7b, 0x91, 0x72, 0xdf, 0xa7, 0xc7, 0x58, 0x66, 0xa1, - 0x3a, 0xbc, 0x91, 0x10, 0x0a, 0x43, 0xcc, 0x57, 0x14, 0x7c, 0xd7, 0x63, 0x17, 0x4f, 0x88, 0xe9, - 0x76, 0x21, 0xcf, 0xe7, 0x76, 0x1a, 0xe9, 0xd7, 0xc4, 0x53, 0x71, 0xfb, 0x12, 0x3e, 0x11, 0x1b, - 0x37, 0x39, 0xce, 0xdc, 0xfa, 0x43, 0x81, 0x9c, 0x28, 0x01, 0xdd, 0x86, 0xe2, 0xb9, 0x23, 0x56, - 0xc4, 0x11, 0x6f, 0xb0, 0xf4, 0xf9, 0x26, 0x23, 0x99, 0x4d, 0x8d, 0xe4, 0xe2, 0x99, 0xaf, 0xbc, - 0xce, 0x33, 0x57, 0x97, 0x9d, 0xf9, 0xd6, 0xcb, 0x2c, 0xa8, 0xbc, 0x3f, 0xff, 0xe3, 0x07, 0x69, - 0xb1, 0xd7, 0xea, 0xeb, 0xec, 0x75, 0x6e, 0xe9, 0xfd, 0x9a, 0xbd, 0x58, 0xf9, 0xd4, 0x8b, 0x55, - 0xfd, 0x41, 0x81, 0xd5, 0xe4, 0xbd, 0x41, 0x6f, 0xc2, 0x0d, 0xeb, 0xb0, 0xd9, 0x75, 0x3a, 0x66, - 0xb7, 0xed, 0xf4, 0xbb, 0xd6, 0xa1, 0xd1, 0x32, 0xf7, 0x4c, 0xa3, 0xad, 0x65, 0xd0, 0x4d, 0x40, - 0x73, 0x97, 0xd9, 0xb5, 0x0d, 0xdc, 0x6d, 0xee, 0x6b, 0x0a, 0x2a, 0x81, 0x36, 0xb7, 0x5b, 0x06, - 0x3e, 0x32, 0xb0, 0x96, 0x5d, 0xb4, 0xb6, 0xf6, 0x4d, 0xa3, 0x6b, 0x6b, 0x2b, 0x8b, 0x18, 0x87, - 0xb8, 0xd7, 0xee, 0xb7, 0x0c, 0xac, 0xa9, 0x8b, 0xf6, 0x56, 0xaf, 0x6b, 0xf5, 0x0f, 0x0c, 0xac, - 0xe5, 0xaa, 0xbf, 0x2b, 0x90, 0x97, 0x77, 0x00, 0xe9, 0x50, 0x18, 0x93, 0x28, 0x72, 0x87, 0xc9, - 0x20, 0x27, 0x5b, 0xd4, 0x02, 0x75, 0x10, 0x78, 0xb2, 0xf3, 0xc5, 0x7b, 0x8d, 0x57, 0xb9, 0x51, - 0xf1, 0xbf, 0x56, 0xe0, 0x11, 0x2c, 0x92, 0xab, 0x5d, 0x80, 0xb9, 0x0d, 0xdd, 0x80, 0xeb, 0x96, - 0xdd, 0xb4, 0xfb, 0x96, 0xd3, 0xea, 0xb5, 0x0d, 0xde, 0x08, 0xc3, 0xd6, 0x32, 0x08, 0x41, 0x31, - 0x6d, 0xee, 0x75, 0x34, 0xe5, 0x7c, 0xa8, 0x81, 0x71, 0x0f, 0x6b, 0xd9, 0xcf, 0xd5, 0x55, 0x45, - 0xcb, 0xde, 0xf9, 0x51, 0x81, 0x35, 0xde, 0xdb, 0x3d, 0xf1, 0xdb, 0x90, 0x34, 0x77, 0x6f, 0xbf, - 0xf9, 0xa9, 0xe5, 0xb4, 0x7b, 0x4e, 0xb7, 0x67, 0x3b, 0x7d, 0xcb, 0xd0, 0x32, 0xa8, 0x02, 0x6f, - 0xa5, 0x5c, 0x36, 0x6e, 0xb6, 0x8c, 0x78, 0x7d, 0xd0, 0xb4, 0x3a, 0xda, 0x4b, 0x05, 0xdd, 0x81, - 0x77, 0x53, 0x11, 0xad, 0x5e, 0xd7, 0x36, 0xbe, 0xb0, 0x9d, 0xcf, 0x9a, 0x96, 0x63, 0x5a, 0x0e, - 0x36, 0x0e, 0x7a, 0xb6, 0x21, 0x63, 0xbf, 0xcb, 0xa2, 0xf7, 0xe0, 0x9d, 0x0b, 0x62, 0xcf, 0xc7, - 0xa9, 0xbb, 0xbf, 0x28, 0xcf, 0x4e, 0xcb, 0xca, 0xf3, 0xd3, 0xb2, 0xf2, 0xf7, 0x69, 0x59, 0xf9, - 0xfe, 0xac, 0x9c, 0x79, 0x7e, 0x56, 0xce, 0xfc, 0x79, 0x56, 0xce, 0xc0, 0xb6, 0x1f, 0x2c, 0x6d, - 0xe4, 0xae, 0xfc, 0x18, 0x3d, 0xe4, 0xc6, 0x43, 0xe5, 0xab, 0xd6, 0x95, 0xaf, 0x91, 0xfc, 0xe0, - 0x1d, 0x12, 0x3a, 0xfb, 0xfa, 0xfe, 0x39, 0x7b, 0xab, 0x37, 0x21, 0xd4, 0x9e, 0x41, 0x08, 0x70, - 0x79, 0x97, 0xeb, 0x47, 0x3b, 0x8f, 0xf2, 0x22, 0xe3, 0xfe, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, - 0xfd, 0xbe, 0x84, 0xc3, 0xc3, 0x0b, 0x00, 0x00, -} - -func (m *TracesData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TracesData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TracesData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceSpans) > 0 { - for iNdEx := len(m.ResourceSpans) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResourceSpans) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceSpans) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceSpans) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.DeprecatedScopeSpans) > 0 { - for iNdEx := len(m.DeprecatedScopeSpans) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DeprecatedScopeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3e - i-- - dAtA[i] = 0xc2 - } - } - if len(m.SchemaUrl) > 0 { - i -= len(m.SchemaUrl) - copy(dAtA[i:], m.SchemaUrl) - i = encodeVarintTrace(dAtA, i, uint64(len(m.SchemaUrl))) - i-- - dAtA[i] = 0x1a - } - if len(m.ScopeSpans) > 0 { - for iNdEx := len(m.ScopeSpans) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ScopeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ScopeSpans) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScopeSpans) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScopeSpans) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SchemaUrl) > 0 { - i -= len(m.SchemaUrl) - copy(dAtA[i:], m.SchemaUrl) - i = encodeVarintTrace(dAtA, i, uint64(len(m.SchemaUrl))) - i-- - dAtA[i] = 0x1a - } - if len(m.Spans) > 0 { - for iNdEx := len(m.Spans) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Spans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Span) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Span) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Span) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Flags != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x85 - } - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x7a - if m.DroppedLinksCount != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.DroppedLinksCount)) - i-- - dAtA[i] = 0x70 - } - if len(m.Links) > 0 { - for iNdEx := len(m.Links) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Links[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - } - if m.DroppedEventsCount != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.DroppedEventsCount)) - i-- - dAtA[i] = 0x60 - } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - } - if m.DroppedAttributesCount != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x50 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - } - if m.EndTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.EndTimeUnixNano)) - i-- - dAtA[i] = 0x41 - } - if m.StartTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) - i-- - dAtA[i] = 0x39 - } - if m.Kind != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.Kind)) - i-- - dAtA[i] = 0x30 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintTrace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x2a - } - { - size := m.ParentSpanId.Size() - i -= size - if _, err := m.ParentSpanId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.TraceState) > 0 { - i -= len(m.TraceState) - copy(dAtA[i:], m.TraceState) - i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState))) - i-- - dAtA[i] = 0x1a - } - { - size := m.SpanId.Size() - i -= size - if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size := m.TraceId.Size() - i -= size - if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Span_Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Span_Event) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Span_Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DroppedAttributesCount != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x20 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintTrace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func (m *Span_Link) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Span_Link) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Span_Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Flags != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags)) - i-- - dAtA[i] = 0x35 - } - if m.DroppedAttributesCount != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x28 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.TraceState) > 0 { - i -= len(m.TraceState) - copy(dAtA[i:], m.TraceState) - i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState))) - i-- - dAtA[i] = 0x1a - } - { - size := m.SpanId.Size() - i -= size - if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size := m.TraceId.Size() - i -= size - if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Status) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Status) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Code != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x18 - } - if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintTrace(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} - -func encodeVarintTrace(dAtA []byte, offset int, v uint64) int { - offset -= sovTrace(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *TracesData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceSpans) > 0 { - for _, e := range m.ResourceSpans { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - return n -} - -func (m *ResourceSpans) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Resource.Size() - n += 1 + l + sovTrace(uint64(l)) - if len(m.ScopeSpans) > 0 { - for _, e := range m.ScopeSpans { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - l = len(m.SchemaUrl) - if l > 0 { - n += 1 + l + sovTrace(uint64(l)) - } - if len(m.DeprecatedScopeSpans) > 0 { - for _, e := range m.DeprecatedScopeSpans { - l = e.Size() - n += 2 + l + sovTrace(uint64(l)) - } - } - return n -} - -func (m *ScopeSpans) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Scope.Size() - n += 1 + l + sovTrace(uint64(l)) - if len(m.Spans) > 0 { - for _, e := range m.Spans { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - l = len(m.SchemaUrl) - if l > 0 { - n += 1 + l + sovTrace(uint64(l)) - } - return n -} - -func (m *Span) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.TraceId.Size() - n += 1 + l + sovTrace(uint64(l)) - l = m.SpanId.Size() - n += 1 + l + sovTrace(uint64(l)) - l = len(m.TraceState) - if l > 0 { - n += 1 + l + sovTrace(uint64(l)) - } - l = m.ParentSpanId.Size() - n += 1 + l + sovTrace(uint64(l)) - l = len(m.Name) - if l > 0 { - n += 1 + l + sovTrace(uint64(l)) - } - if m.Kind != 0 { - n += 1 + sovTrace(uint64(m.Kind)) - } - if m.StartTimeUnixNano != 0 { - n += 9 - } - if m.EndTimeUnixNano != 0 { - n += 9 - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - if m.DroppedAttributesCount != 0 { - n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - if m.DroppedEventsCount != 0 { - n += 1 + sovTrace(uint64(m.DroppedEventsCount)) - } - if len(m.Links) > 0 { - for _, e := range m.Links { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - if m.DroppedLinksCount != 0 { - n += 1 + sovTrace(uint64(m.DroppedLinksCount)) - } - l = m.Status.Size() - n += 1 + l + sovTrace(uint64(l)) - if m.Flags != 0 { - n += 6 - } - return n -} - -func (m *Span_Event) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TimeUnixNano != 0 { - n += 9 - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovTrace(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - if m.DroppedAttributesCount != 0 { - n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) - } - return n -} - -func (m *Span_Link) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.TraceId.Size() - n += 1 + l + sovTrace(uint64(l)) - l = m.SpanId.Size() - n += 1 + l + sovTrace(uint64(l)) - l = len(m.TraceState) - if l > 0 { - n += 1 + l + sovTrace(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - if m.DroppedAttributesCount != 0 { - n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) - } - if m.Flags != 0 { - n += 5 - } - return n -} - -func (m *Status) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Message) - if l > 0 { - n += 1 + l + sovTrace(uint64(l)) - } - if m.Code != 0 { - n += 1 + sovTrace(uint64(m.Code)) - } - return n -} - -func sovTrace(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTrace(x uint64) (n int) { - return sovTrace(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *TracesData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TracesData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TracesData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceSpans = append(m.ResourceSpans, &ResourceSpans{}) - if err := m.ResourceSpans[len(m.ResourceSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTrace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTrace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceSpans) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceSpans: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceSpans: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScopeSpans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ScopeSpans = append(m.ScopeSpans, &ScopeSpans{}) - if err := m.ScopeSpans[len(m.ScopeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 1000: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeSpans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeprecatedScopeSpans = append(m.DeprecatedScopeSpans, &ScopeSpans{}) - if err := m.DeprecatedScopeSpans[len(m.DeprecatedScopeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTrace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTrace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScopeSpans) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScopeSpans: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScopeSpans: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Spans = append(m.Spans, &Span{}) - if err := m.Spans[len(m.Spans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTrace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTrace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Span) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Span: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TraceState = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ParentSpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - m.Kind = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Kind |= Span_SpanKind(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) - } - m.StartTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 8: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType) - } - m.EndTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.EndTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, &Span_Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedEventsCount", wireType) - } - m.DroppedEventsCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedEventsCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Links = append(m.Links, &Span_Link{}) - if err := m.Links[len(m.Links)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedLinksCount", wireType) - } - m.DroppedLinksCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedLinksCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) - } - m.Flags = 0 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - default: - iNdEx = preIndex - skippy, err := skipTrace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTrace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Span_Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTrace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTrace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Span_Link) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Link: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TraceState = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) - } - m.Flags = 0 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - default: - iNdEx = preIndex - skippy, err := skipTrace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTrace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Status) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Status: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) - } - m.Code = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Code |= Status_StatusCode(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTrace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTrace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTrace(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTrace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTrace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTrace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTrace - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTrace - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTrace - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTrace = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTrace = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTrace = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go deleted file mode 100644 index 597e071ddaa..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package data // import "go.opentelemetry.io/collector/pdata/internal/data" - -import ( - "encoding/hex" - "errors" - - "github.com/gogo/protobuf/proto" - - "go.opentelemetry.io/collector/pdata/internal/json" -) - -const spanIDSize = 8 - -var ( - errMarshalSpanID = errors.New("marshal: invalid buffer length for SpanID") - errUnmarshalSpanID = errors.New("unmarshal: invalid SpanID length") -) - -// SpanID is a custom data type that is used for all span_id fields in OTLP -// Protobuf messages. -type SpanID [spanIDSize]byte - -var _ proto.Sizer = (*SpanID)(nil) - -// Size returns the size of the data to serialize. -func (sid SpanID) Size() int { - if sid.IsEmpty() { - return 0 - } - return spanIDSize -} - -// IsEmpty returns true if id contains at least one non-zero byte. -func (sid SpanID) IsEmpty() bool { - return sid == [spanIDSize]byte{} -} - -// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization. -func (sid SpanID) MarshalTo(data []byte) (n int, err error) { - if sid.IsEmpty() { - return 0, nil - } - - if len(data) < spanIDSize { - return 0, errMarshalSpanID - } - - return copy(data, sid[:]), nil -} - -// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization. -func (sid *SpanID) Unmarshal(data []byte) error { - if len(data) == 0 { - *sid = [spanIDSize]byte{} - return nil - } - - if len(data) != spanIDSize { - return errUnmarshalSpanID - } - - copy(sid[:], data) - return nil -} - -// MarshalJSONStream converts SpanID into a hex string. -func (sid SpanID) MarshalJSONStream(dest *json.Stream) { - dest.WriteString(hex.EncodeToString(sid[:])) -} - -// UnmarshalJSONIter decodes SpanID from hex string. -func (sid *SpanID) UnmarshalJSONIter(iter *json.Iterator) { - *sid = [spanIDSize]byte{} - unmarshalJSON(sid[:], iter) -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go deleted file mode 100644 index 0e7c98ac910..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package data // import "go.opentelemetry.io/collector/pdata/internal/data" - -import ( - "encoding/hex" - "errors" - - "github.com/gogo/protobuf/proto" - - "go.opentelemetry.io/collector/pdata/internal/json" -) - -const traceIDSize = 16 - -var ( - errMarshalTraceID = errors.New("marshal: invalid buffer length for TraceID") - errUnmarshalTraceID = errors.New("unmarshal: invalid TraceID length") -) - -// TraceID is a custom data type that is used for all trace_id fields in OTLP -// Protobuf messages. -type TraceID [traceIDSize]byte - -var _ proto.Sizer = (*TraceID)(nil) - -// Size returns the size of the data to serialize. -func (tid TraceID) Size() int { - if tid.IsEmpty() { - return 0 - } - return traceIDSize -} - -// IsEmpty returns true if id contains at leas one non-zero byte. -func (tid TraceID) IsEmpty() bool { - return tid == [traceIDSize]byte{} -} - -// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization. -func (tid TraceID) MarshalTo(data []byte) (n int, err error) { - if tid.IsEmpty() { - return 0, nil - } - - if len(data) < traceIDSize { - return 0, errMarshalTraceID - } - - return copy(data, tid[:]), nil -} - -// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization. -func (tid *TraceID) Unmarshal(data []byte) error { - if len(data) == 0 { - *tid = [traceIDSize]byte{} - return nil - } - - if len(data) != traceIDSize { - return errUnmarshalTraceID - } - - copy(tid[:], data) - return nil -} - -// MarshalJSONStream converts TraceID into a hex string. -func (tid TraceID) MarshalJSONStream(dest *json.Stream) { - dest.WriteString(hex.EncodeToString(tid[:])) -} - -// UnmarshalJSONIter decodes TraceID from hex string. -func (tid *TraceID) UnmarshalJSONIter(iter *json.Iterator) { - *tid = [profileIDSize]byte{} - unmarshalJSON(tid[:], iter) -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_aggregationtemporality.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_aggregationtemporality.go new file mode 100644 index 00000000000..6c3a7208dbc --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_aggregationtemporality.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +const ( + AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED = AggregationTemporality(0) + AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA = AggregationTemporality(1) + AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE = AggregationTemporality(2) +) + +// AggregationTemporality defines how a metric aggregator reports aggregated values. +// It describes how those values relate to the time interval over which they are aggregated. +type AggregationTemporality int32 + +var AggregationTemporality_name = map[int32]string{ + 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED", + 1: "AGGREGATION_TEMPORALITY_DELTA", + 2: "AGGREGATION_TEMPORALITY_CUMULATIVE", +} + +var AggregationTemporality_value = map[string]int32{ + "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0, + "AGGREGATION_TEMPORALITY_DELTA": 1, + "AGGREGATION_TEMPORALITY_CUMULATIVE": 2, +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_severitynumber.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_severitynumber.go new file mode 100644 index 00000000000..4ad9a5b29db --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_severitynumber.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +const ( + SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED = SeverityNumber(0) + SeverityNumber_SEVERITY_NUMBER_TRACE = SeverityNumber(1) + SeverityNumber_SEVERITY_NUMBER_TRACE2 = SeverityNumber(2) + SeverityNumber_SEVERITY_NUMBER_TRACE3 = SeverityNumber(3) + SeverityNumber_SEVERITY_NUMBER_TRACE4 = SeverityNumber(4) + SeverityNumber_SEVERITY_NUMBER_DEBUG = SeverityNumber(5) + SeverityNumber_SEVERITY_NUMBER_DEBUG2 = SeverityNumber(6) + SeverityNumber_SEVERITY_NUMBER_DEBUG3 = SeverityNumber(7) + SeverityNumber_SEVERITY_NUMBER_DEBUG4 = SeverityNumber(8) + SeverityNumber_SEVERITY_NUMBER_INFO = SeverityNumber(9) + SeverityNumber_SEVERITY_NUMBER_INFO2 = SeverityNumber(10) + SeverityNumber_SEVERITY_NUMBER_INFO3 = SeverityNumber(11) + SeverityNumber_SEVERITY_NUMBER_INFO4 = SeverityNumber(12) + SeverityNumber_SEVERITY_NUMBER_WARN = SeverityNumber(13) + SeverityNumber_SEVERITY_NUMBER_WARN2 = SeverityNumber(14) + SeverityNumber_SEVERITY_NUMBER_WARN3 = SeverityNumber(15) + SeverityNumber_SEVERITY_NUMBER_WARN4 = SeverityNumber(16) + SeverityNumber_SEVERITY_NUMBER_ERROR = SeverityNumber(17) + SeverityNumber_SEVERITY_NUMBER_ERROR2 = SeverityNumber(18) + SeverityNumber_SEVERITY_NUMBER_ERROR3 = SeverityNumber(19) + SeverityNumber_SEVERITY_NUMBER_ERROR4 = SeverityNumber(20) + SeverityNumber_SEVERITY_NUMBER_FATAL = SeverityNumber(21) + SeverityNumber_SEVERITY_NUMBER_FATAL2 = SeverityNumber(22) + SeverityNumber_SEVERITY_NUMBER_FATAL3 = SeverityNumber(23) + SeverityNumber_SEVERITY_NUMBER_FATAL4 = SeverityNumber(24) +) + +// SeverityNumber represent possible values for LogRecord.SeverityNumber +type SeverityNumber int32 + +var SeverityNumber_name = map[int32]string{ + 0: "SEVERITY_NUMBER_UNSPECIFIED", + 1: "SEVERITY_NUMBER_TRACE ", + 2: "SEVERITY_NUMBER_TRACE2", + 3: "SEVERITY_NUMBER_TRACE3", + 4: "SEVERITY_NUMBER_TRACE4", + 5: "SEVERITY_NUMBER_DEBUG", + 6: "SEVERITY_NUMBER_DEBUG2", + 7: "SEVERITY_NUMBER_DEBUG3", + 8: "SEVERITY_NUMBER_DEBUG4", + 9: "SEVERITY_NUMBER_INFO", + 10: "SEVERITY_NUMBER_INFO2", + 11: "SEVERITY_NUMBER_INFO3", + 12: "SEVERITY_NUMBER_INFO4", + 13: "SEVERITY_NUMBER_WARN", + 14: "SEVERITY_NUMBER_WARN2", + 15: "SEVERITY_NUMBER_WARN3", + 16: "SEVERITY_NUMBER_WARN4", + 17: "SEVERITY_NUMBER_ERROR", + 18: "SEVERITY_NUMBER_ERROR2", + 19: "SEVERITY_NUMBER_ERROR3", + 20: "SEVERITY_NUMBER_ERROR4", + 21: "SEVERITY_NUMBER_FATAL", + 22: "SEVERITY_NUMBER_FATAL2", + 23: "SEVERITY_NUMBER_FATAL3", + 24: "SEVERITY_NUMBER_FATAL4", +} + +var SeverityNumber_value = map[string]int32{ + "SEVERITY_NUMBER_UNSPECIFIED": 0, + "SEVERITY_NUMBER_TRACE ": 1, + "SEVERITY_NUMBER_TRACE2": 2, + "SEVERITY_NUMBER_TRACE3": 3, + "SEVERITY_NUMBER_TRACE4": 4, + "SEVERITY_NUMBER_DEBUG": 5, + "SEVERITY_NUMBER_DEBUG2": 6, + "SEVERITY_NUMBER_DEBUG3": 7, + "SEVERITY_NUMBER_DEBUG4": 8, + "SEVERITY_NUMBER_INFO": 9, + "SEVERITY_NUMBER_INFO2": 10, + "SEVERITY_NUMBER_INFO3": 11, + "SEVERITY_NUMBER_INFO4": 12, + "SEVERITY_NUMBER_WARN": 13, + "SEVERITY_NUMBER_WARN2": 14, + "SEVERITY_NUMBER_WARN3": 15, + "SEVERITY_NUMBER_WARN4": 16, + "SEVERITY_NUMBER_ERROR": 17, + "SEVERITY_NUMBER_ERROR2": 18, + "SEVERITY_NUMBER_ERROR3": 19, + "SEVERITY_NUMBER_ERROR4": 20, + "SEVERITY_NUMBER_FATAL": 21, + "SEVERITY_NUMBER_FATAL2": 22, + "SEVERITY_NUMBER_FATAL3": 23, + "SEVERITY_NUMBER_FATAL4": 24, +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_spankind.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_spankind.go new file mode 100644 index 00000000000..1632aadde2a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_spankind.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +const ( + SpanKind_SPAN_KIND_UNSPECIFIED = SpanKind(0) + SpanKind_SPAN_KIND_INTERNAL = SpanKind(1) + SpanKind_SPAN_KIND_SERVER = SpanKind(2) + SpanKind_SPAN_KIND_CLIENT = SpanKind(3) + SpanKind_SPAN_KIND_PRODUCER = SpanKind(4) + SpanKind_SPAN_KIND_CONSUMER = SpanKind(5) +) + +// SpanKind is the type of span. +// Can be used to specify additional relationships between spans in addition to a parent/child relationship. +type SpanKind int32 + +var SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SPAN_KIND_INTERNAL", + 2: "SPAN_KIND_SERVER", + 3: "SPAN_KIND_CLIENT", + 4: "SPAN_KIND_PRODUCER", + 5: "SPAN_KIND_CONSUMER", +} + +var SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SPAN_KIND_INTERNAL": 1, + "SPAN_KIND_SERVER": 2, + "SPAN_KIND_CLIENT": 3, + "SPAN_KIND_PRODUCER": 4, + "SPAN_KIND_CONSUMER": 5, +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_statuscode.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_statuscode.go new file mode 100644 index 00000000000..ac5aab75934 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_statuscode.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +const ( + StatusCode_STATUS_CODE_UNSET = StatusCode(0) + StatusCode_STATUS_CODE_OK = StatusCode(1) + StatusCode_STATUS_CODE_ERROR = StatusCode(2) +) + +// StatusCode is the status of the span, for the semantics of codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +var StatusCode_name = map[int32]string{ + 0: "STATUS_CODE_UNSET", + 1: "STATUS_CODE_OK", + 2: "STATUS_CODE_ERROR", +} + +var StatusCode_value = map[string]int32{ + "STATUS_CODE_UNSET": 0, + "STATUS_CODE_OK": 1, + "STATUS_CODE_ERROR": 2, +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_anyvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_anyvalue.go new file mode 100644 index 00000000000..103c4c5b443 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_anyvalue.go @@ -0,0 +1,770 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "encoding/binary" + "fmt" + "math" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +func (m *AnyValue) GetValue() any { + if m != nil { + return m.Value + } + return nil +} + +type AnyValue_StringValue struct { + StringValue string +} + +func (m *AnyValue) GetStringValue() string { + if v, ok := m.GetValue().(*AnyValue_StringValue); ok { + return v.StringValue + } + return "" +} + +type AnyValue_BoolValue struct { + BoolValue bool +} + +func (m *AnyValue) GetBoolValue() bool { + if v, ok := m.GetValue().(*AnyValue_BoolValue); ok { + return v.BoolValue + } + return false +} + +type AnyValue_IntValue struct { + IntValue int64 +} + +func (m *AnyValue) GetIntValue() int64 { + if v, ok := m.GetValue().(*AnyValue_IntValue); ok { + return v.IntValue + } + return int64(0) +} + +type AnyValue_DoubleValue struct { + DoubleValue float64 +} + +func (m *AnyValue) GetDoubleValue() float64 { + if v, ok := m.GetValue().(*AnyValue_DoubleValue); ok { + return v.DoubleValue + } + return float64(0) +} + +type AnyValue_ArrayValue struct { + ArrayValue *ArrayValue +} + +func (m *AnyValue) GetArrayValue() *ArrayValue { + if v, ok := m.GetValue().(*AnyValue_ArrayValue); ok { + return v.ArrayValue + } + return nil +} + +type AnyValue_KvlistValue struct { + KvlistValue *KeyValueList +} + +func (m *AnyValue) GetKvlistValue() *KeyValueList { + if v, ok := m.GetValue().(*AnyValue_KvlistValue); ok { + return v.KvlistValue + } + return nil +} + +type AnyValue_BytesValue struct { + BytesValue []byte +} + +func (m *AnyValue) GetBytesValue() []byte { + if v, ok := m.GetValue().(*AnyValue_BytesValue); ok { + return v.BytesValue + } + return nil +} + +type AnyValue struct { + Value any +} + +var ( + protoPoolAnyValue = sync.Pool{ + New: func() any { + return &AnyValue{} + }, + } + + ProtoPoolAnyValue_StringValue = sync.Pool{ + New: func() any { + return &AnyValue_StringValue{} + }, + } + + ProtoPoolAnyValue_BoolValue = sync.Pool{ + New: func() any { + return &AnyValue_BoolValue{} + }, + } + + ProtoPoolAnyValue_IntValue = sync.Pool{ + New: func() any { + return &AnyValue_IntValue{} + }, + } + + ProtoPoolAnyValue_DoubleValue = sync.Pool{ + New: func() any { + return &AnyValue_DoubleValue{} + }, + } + + ProtoPoolAnyValue_ArrayValue = sync.Pool{ + New: func() any { + return &AnyValue_ArrayValue{} + }, + } + + ProtoPoolAnyValue_KvlistValue = sync.Pool{ + New: func() any { + return &AnyValue_KvlistValue{} + }, + } + + ProtoPoolAnyValue_BytesValue = sync.Pool{ + New: func() any { + return &AnyValue_BytesValue{} + }, + } +) + +func NewAnyValue() *AnyValue { + if !UseProtoPooling.IsEnabled() { + return &AnyValue{} + } + return protoPoolAnyValue.Get().(*AnyValue) +} + +func DeleteAnyValue(orig *AnyValue, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + switch ov := orig.Value.(type) { + case *AnyValue_StringValue: + if UseProtoPooling.IsEnabled() { + ov.StringValue = "" + ProtoPoolAnyValue_StringValue.Put(ov) + } + case *AnyValue_BoolValue: + if UseProtoPooling.IsEnabled() { + ov.BoolValue = false + ProtoPoolAnyValue_BoolValue.Put(ov) + } + case *AnyValue_IntValue: + if UseProtoPooling.IsEnabled() { + ov.IntValue = int64(0) + ProtoPoolAnyValue_IntValue.Put(ov) + } + case *AnyValue_DoubleValue: + if UseProtoPooling.IsEnabled() { + ov.DoubleValue = float64(0) + ProtoPoolAnyValue_DoubleValue.Put(ov) + } + case *AnyValue_ArrayValue: + DeleteArrayValue(ov.ArrayValue, true) + ov.ArrayValue = nil + ProtoPoolAnyValue_ArrayValue.Put(ov) + case *AnyValue_KvlistValue: + DeleteKeyValueList(ov.KvlistValue, true) + ov.KvlistValue = nil + ProtoPoolAnyValue_KvlistValue.Put(ov) + case *AnyValue_BytesValue: + if UseProtoPooling.IsEnabled() { + ov.BytesValue = nil + ProtoPoolAnyValue_BytesValue.Put(ov) + } + } + orig.Reset() + if nullable { + protoPoolAnyValue.Put(orig) + } +} + +func CopyAnyValue(dest, src *AnyValue) *AnyValue { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewAnyValue() + } + switch t := src.Value.(type) { + case *AnyValue_StringValue: + var ov *AnyValue_StringValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_StringValue{} + } else { + ov = ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue) + } + ov.StringValue = t.StringValue + dest.Value = ov + + case *AnyValue_BoolValue: + var ov *AnyValue_BoolValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_BoolValue{} + } else { + ov = ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue) + } + ov.BoolValue = t.BoolValue + dest.Value = ov + + case *AnyValue_IntValue: + var ov *AnyValue_IntValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_IntValue{} + } else { + ov = ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue) + } + ov.IntValue = t.IntValue + dest.Value = ov + + case *AnyValue_DoubleValue: + var ov *AnyValue_DoubleValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_DoubleValue{} + } else { + ov = ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue) + } + ov.DoubleValue = t.DoubleValue + dest.Value = ov + + case *AnyValue_ArrayValue: + var ov *AnyValue_ArrayValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_ArrayValue{} + } else { + ov = ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue) + } + ov.ArrayValue = NewArrayValue() + CopyArrayValue(ov.ArrayValue, t.ArrayValue) + dest.Value = ov + + case *AnyValue_KvlistValue: + var ov *AnyValue_KvlistValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_KvlistValue{} + } else { + ov = ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue) + } + ov.KvlistValue = NewKeyValueList() + CopyKeyValueList(ov.KvlistValue, t.KvlistValue) + dest.Value = ov + + case *AnyValue_BytesValue: + var ov *AnyValue_BytesValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_BytesValue{} + } else { + ov = ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue) + } + ov.BytesValue = t.BytesValue + dest.Value = ov + + default: + dest.Value = nil + } + + return dest +} + +func CopyAnyValueSlice(dest, src []AnyValue) []AnyValue { + var newDest []AnyValue + if cap(dest) < len(src) { + newDest = make([]AnyValue, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteAnyValue(&dest[i], false) + } + } + for i := range src { + CopyAnyValue(&newDest[i], &src[i]) + } + return newDest +} + +func CopyAnyValuePtrSlice(dest, src []*AnyValue) []*AnyValue { + var newDest []*AnyValue + if cap(dest) < len(src) { + newDest = make([]*AnyValue, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewAnyValue() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteAnyValue(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewAnyValue() + } + } + for i := range src { + CopyAnyValue(newDest[i], src[i]) + } + return newDest +} + +func (orig *AnyValue) Reset() { + *orig = AnyValue{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *AnyValue) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + switch orig := orig.Value.(type) { + case *AnyValue_StringValue: + dest.WriteObjectField("stringValue") + dest.WriteString(orig.StringValue) + case *AnyValue_BoolValue: + dest.WriteObjectField("boolValue") + dest.WriteBool(orig.BoolValue) + case *AnyValue_IntValue: + dest.WriteObjectField("intValue") + dest.WriteInt64(orig.IntValue) + case *AnyValue_DoubleValue: + dest.WriteObjectField("doubleValue") + dest.WriteFloat64(orig.DoubleValue) + case *AnyValue_ArrayValue: + if orig.ArrayValue != nil { + dest.WriteObjectField("arrayValue") + orig.ArrayValue.MarshalJSON(dest) + } + case *AnyValue_KvlistValue: + if orig.KvlistValue != nil { + dest.WriteObjectField("kvlistValue") + orig.KvlistValue.MarshalJSON(dest) + } + case *AnyValue_BytesValue: + + dest.WriteObjectField("bytesValue") + dest.WriteBytes(orig.BytesValue) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *AnyValue) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + + case "stringValue", "string_value": + { + var ov *AnyValue_StringValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_StringValue{} + } else { + ov = ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue) + } + ov.StringValue = iter.ReadString() + orig.Value = ov + } + case "boolValue", "bool_value": + { + var ov *AnyValue_BoolValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_BoolValue{} + } else { + ov = ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue) + } + ov.BoolValue = iter.ReadBool() + orig.Value = ov + } + case "intValue", "int_value": + { + var ov *AnyValue_IntValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_IntValue{} + } else { + ov = ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue) + } + ov.IntValue = iter.ReadInt64() + orig.Value = ov + } + case "doubleValue", "double_value": + { + var ov *AnyValue_DoubleValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_DoubleValue{} + } else { + ov = ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue) + } + ov.DoubleValue = iter.ReadFloat64() + orig.Value = ov + } + case "arrayValue", "array_value": + { + var ov *AnyValue_ArrayValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_ArrayValue{} + } else { + ov = ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue) + } + ov.ArrayValue = NewArrayValue() + ov.ArrayValue.UnmarshalJSON(iter) + orig.Value = ov + } + case "kvlistValue", "kvlist_value": + { + var ov *AnyValue_KvlistValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_KvlistValue{} + } else { + ov = ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue) + } + ov.KvlistValue = NewKeyValueList() + ov.KvlistValue.UnmarshalJSON(iter) + orig.Value = ov + } + case "bytesValue", "bytes_value": + { + var ov *AnyValue_BytesValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_BytesValue{} + } else { + ov = ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue) + } + ov.BytesValue = iter.ReadBytes() + orig.Value = ov + } + + default: + iter.Skip() + } + } +} + +func (orig *AnyValue) SizeProto() int { + var n int + var l int + _ = l + switch orig := orig.Value.(type) { + case nil: + _ = orig + break + case *AnyValue_StringValue: + l = len(orig.StringValue) + n += 1 + proto.Sov(uint64(l)) + l + case *AnyValue_BoolValue: + + n += 2 + case *AnyValue_IntValue: + + n += 1 + proto.Sov(uint64(orig.IntValue)) + case *AnyValue_DoubleValue: + + n += 9 + case *AnyValue_ArrayValue: + if orig.ArrayValue != nil { + l = orig.ArrayValue.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + case *AnyValue_KvlistValue: + if orig.KvlistValue != nil { + l = orig.KvlistValue.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + case *AnyValue_BytesValue: + l = len(orig.BytesValue) + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *AnyValue) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + switch orig := orig.Value.(type) { + case *AnyValue_StringValue: + l = len(orig.StringValue) + pos -= l + copy(buf[pos:], orig.StringValue) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + case *AnyValue_BoolValue: + pos-- + if orig.BoolValue { + buf[pos] = 1 + } else { + buf[pos] = 0 + } + pos-- + buf[pos] = 0x10 + + case *AnyValue_IntValue: + pos = proto.EncodeVarint(buf, pos, uint64(orig.IntValue)) + pos-- + buf[pos] = 0x18 + + case *AnyValue_DoubleValue: + pos -= 8 + binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.DoubleValue)) + pos-- + buf[pos] = 0x21 + + case *AnyValue_ArrayValue: + if orig.ArrayValue != nil { + l = orig.ArrayValue.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x2a + } + case *AnyValue_KvlistValue: + if orig.KvlistValue != nil { + l = orig.KvlistValue.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x32 + } + case *AnyValue_BytesValue: + l = len(orig.BytesValue) + pos -= l + copy(buf[pos:], orig.BytesValue) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x3a + + } + return len(buf) - pos +} + +func (orig *AnyValue) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var ov *AnyValue_StringValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_StringValue{} + } else { + ov = ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue) + } + ov.StringValue = string(buf[startPos:pos]) + orig.Value = ov + + case 2: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + var ov *AnyValue_BoolValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_BoolValue{} + } else { + ov = ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue) + } + ov.BoolValue = num != 0 + orig.Value = ov + + case 3: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + var ov *AnyValue_IntValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_IntValue{} + } else { + ov = ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue) + } + ov.IntValue = int64(num) + orig.Value = ov + + case 4: + if wireType != proto.WireTypeI64 { + return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeI64(buf, pos) + if err != nil { + return err + } + var ov *AnyValue_DoubleValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_DoubleValue{} + } else { + ov = ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue) + } + ov.DoubleValue = math.Float64frombits(num) + orig.Value = ov + + case 5: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var ov *AnyValue_ArrayValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_ArrayValue{} + } else { + ov = ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue) + } + ov.ArrayValue = NewArrayValue() + err = ov.ArrayValue.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + orig.Value = ov + + case 6: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var ov *AnyValue_KvlistValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_KvlistValue{} + } else { + ov = ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue) + } + ov.KvlistValue = NewKeyValueList() + err = ov.KvlistValue.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + orig.Value = ov + + case 7: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field BytesValue", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var ov *AnyValue_BytesValue + if !UseProtoPooling.IsEnabled() { + ov = &AnyValue_BytesValue{} + } else { + ov = ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue) + } + if length != 0 { + ov.BytesValue = make([]byte, length) + copy(ov.BytesValue, buf[startPos:pos]) + } + orig.Value = ov + + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestAnyValue() *AnyValue { + orig := NewAnyValue() + orig.Value = &AnyValue_StringValue{StringValue: "test_stringvalue"} + return orig +} + +func GenTestAnyValuePtrSlice() []*AnyValue { + orig := make([]*AnyValue, 5) + orig[0] = NewAnyValue() + orig[1] = GenTestAnyValue() + orig[2] = NewAnyValue() + orig[3] = GenTestAnyValue() + orig[4] = NewAnyValue() + return orig +} + +func GenTestAnyValueSlice() []AnyValue { + orig := make([]AnyValue, 5) + orig[1] = *GenTestAnyValue() + orig[3] = *GenTestAnyValue() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_arrayvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_arrayvalue.go new file mode 100644 index 00000000000..f973831cf8e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_arrayvalue.go @@ -0,0 +1,243 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message since oneof in AnyValue does not allow repeated fields. +type ArrayValue struct { + Values []AnyValue +} + +var ( + protoPoolArrayValue = sync.Pool{ + New: func() any { + return &ArrayValue{} + }, + } +) + +func NewArrayValue() *ArrayValue { + if !UseProtoPooling.IsEnabled() { + return &ArrayValue{} + } + return protoPoolArrayValue.Get().(*ArrayValue) +} + +func DeleteArrayValue(orig *ArrayValue, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.Values { + DeleteAnyValue(&orig.Values[i], false) + } + orig.Reset() + if nullable { + protoPoolArrayValue.Put(orig) + } +} + +func CopyArrayValue(dest, src *ArrayValue) *ArrayValue { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewArrayValue() + } + dest.Values = CopyAnyValueSlice(dest.Values, src.Values) + + return dest +} + +func CopyArrayValueSlice(dest, src []ArrayValue) []ArrayValue { + var newDest []ArrayValue + if cap(dest) < len(src) { + newDest = make([]ArrayValue, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteArrayValue(&dest[i], false) + } + } + for i := range src { + CopyArrayValue(&newDest[i], &src[i]) + } + return newDest +} + +func CopyArrayValuePtrSlice(dest, src []*ArrayValue) []*ArrayValue { + var newDest []*ArrayValue + if cap(dest) < len(src) { + newDest = make([]*ArrayValue, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewArrayValue() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteArrayValue(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewArrayValue() + } + } + for i := range src { + CopyArrayValue(newDest[i], src[i]) + } + return newDest +} + +func (orig *ArrayValue) Reset() { + *orig = ArrayValue{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ArrayValue) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.Values) > 0 { + dest.WriteObjectField("values") + dest.WriteArrayStart() + orig.Values[0].MarshalJSON(dest) + for i := 1; i < len(orig.Values); i++ { + dest.WriteMore() + orig.Values[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ArrayValue) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "values": + for iter.ReadArray() { + orig.Values = append(orig.Values, AnyValue{}) + orig.Values[len(orig.Values)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *ArrayValue) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.Values { + l = orig.Values[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ArrayValue) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.Values) - 1; i >= 0; i-- { + l = orig.Values[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + return len(buf) - pos +} + +func (orig *ArrayValue) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Values = append(orig.Values, AnyValue{}) + err = orig.Values[len(orig.Values)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestArrayValue() *ArrayValue { + orig := NewArrayValue() + orig.Values = []AnyValue{{}, *GenTestAnyValue()} + return orig +} + +func GenTestArrayValuePtrSlice() []*ArrayValue { + orig := make([]*ArrayValue, 5) + orig[0] = NewArrayValue() + orig[1] = GenTestArrayValue() + orig[2] = NewArrayValue() + orig[3] = GenTestArrayValue() + orig[4] = NewArrayValue() + return orig +} + +func GenTestArrayValueSlice() []ArrayValue { + orig := make([]ArrayValue, 5) + orig[1] = *GenTestArrayValue() + orig[3] = *GenTestArrayValue() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_entityref.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_entityref.go new file mode 100644 index 00000000000..d326cfe46e5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_entityref.go @@ -0,0 +1,349 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +type EntityRef struct { + SchemaUrl string + Type string + IdKeys []string + DescriptionKeys []string +} + +var ( + protoPoolEntityRef = sync.Pool{ + New: func() any { + return &EntityRef{} + }, + } +) + +func NewEntityRef() *EntityRef { + if !UseProtoPooling.IsEnabled() { + return &EntityRef{} + } + return protoPoolEntityRef.Get().(*EntityRef) +} + +func DeleteEntityRef(orig *EntityRef, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + orig.Reset() + if nullable { + protoPoolEntityRef.Put(orig) + } +} + +func CopyEntityRef(dest, src *EntityRef) *EntityRef { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewEntityRef() + } + dest.SchemaUrl = src.SchemaUrl + dest.Type = src.Type + dest.IdKeys = append(dest.IdKeys[:0], src.IdKeys...) + + dest.DescriptionKeys = append(dest.DescriptionKeys[:0], src.DescriptionKeys...) + + return dest +} + +func CopyEntityRefSlice(dest, src []EntityRef) []EntityRef { + var newDest []EntityRef + if cap(dest) < len(src) { + newDest = make([]EntityRef, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteEntityRef(&dest[i], false) + } + } + for i := range src { + CopyEntityRef(&newDest[i], &src[i]) + } + return newDest +} + +func CopyEntityRefPtrSlice(dest, src []*EntityRef) []*EntityRef { + var newDest []*EntityRef + if cap(dest) < len(src) { + newDest = make([]*EntityRef, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewEntityRef() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteEntityRef(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewEntityRef() + } + } + for i := range src { + CopyEntityRef(newDest[i], src[i]) + } + return newDest +} + +func (orig *EntityRef) Reset() { + *orig = EntityRef{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *EntityRef) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.SchemaUrl != "" { + dest.WriteObjectField("schemaUrl") + dest.WriteString(orig.SchemaUrl) + } + if orig.Type != "" { + dest.WriteObjectField("type") + dest.WriteString(orig.Type) + } + if len(orig.IdKeys) > 0 { + dest.WriteObjectField("idKeys") + dest.WriteArrayStart() + dest.WriteString(orig.IdKeys[0]) + for i := 1; i < len(orig.IdKeys); i++ { + dest.WriteMore() + dest.WriteString(orig.IdKeys[i]) + } + dest.WriteArrayEnd() + } + + if len(orig.DescriptionKeys) > 0 { + dest.WriteObjectField("descriptionKeys") + dest.WriteArrayStart() + dest.WriteString(orig.DescriptionKeys[0]) + for i := 1; i < len(orig.DescriptionKeys); i++ { + dest.WriteMore() + dest.WriteString(orig.DescriptionKeys[i]) + } + dest.WriteArrayEnd() + } + + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *EntityRef) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "schemaUrl", "schema_url": + orig.SchemaUrl = iter.ReadString() + case "type": + orig.Type = iter.ReadString() + case "idKeys", "id_keys": + for iter.ReadArray() { + orig.IdKeys = append(orig.IdKeys, iter.ReadString()) + } + + case "descriptionKeys", "description_keys": + for iter.ReadArray() { + orig.DescriptionKeys = append(orig.DescriptionKeys, iter.ReadString()) + } + + default: + iter.Skip() + } + } +} + +func (orig *EntityRef) SizeProto() int { + var n int + var l int + _ = l + + l = len(orig.SchemaUrl) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.Type) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + for _, s := range orig.IdKeys { + l = len(s) + n += 1 + proto.Sov(uint64(l)) + l + } + for _, s := range orig.DescriptionKeys { + l = len(s) + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *EntityRef) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = len(orig.SchemaUrl) + if l > 0 { + pos -= l + copy(buf[pos:], orig.SchemaUrl) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + l = len(orig.Type) + if l > 0 { + pos -= l + copy(buf[pos:], orig.Type) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + for i := len(orig.IdKeys) - 1; i >= 0; i-- { + l = len(orig.IdKeys[i]) + pos -= l + copy(buf[pos:], orig.IdKeys[i]) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + for i := len(orig.DescriptionKeys) - 1; i >= 0; i-- { + l = len(orig.DescriptionKeys[i]) + pos -= l + copy(buf[pos:], orig.DescriptionKeys[i]) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x22 + } + return len(buf) - pos +} + +func (orig *EntityRef) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.SchemaUrl = string(buf[startPos:pos]) + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Type = string(buf[startPos:pos]) + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field IdKeys", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.IdKeys = append(orig.IdKeys, string(buf[startPos:pos])) + + case 4: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field DescriptionKeys", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.DescriptionKeys = append(orig.DescriptionKeys, string(buf[startPos:pos])) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestEntityRef() *EntityRef { + orig := NewEntityRef() + orig.SchemaUrl = "test_schemaurl" + orig.Type = "test_type" + orig.IdKeys = []string{"", "test_idkeys"} + orig.DescriptionKeys = []string{"", "test_descriptionkeys"} + return orig +} + +func GenTestEntityRefPtrSlice() []*EntityRef { + orig := make([]*EntityRef, 5) + orig[0] = NewEntityRef() + orig[1] = GenTestEntityRef() + orig[2] = NewEntityRef() + orig[3] = GenTestEntityRef() + orig[4] = NewEntityRef() + return orig +} + +func GenTestEntityRefSlice() []EntityRef { + orig := make([]EntityRef, 5) + orig[1] = *GenTestEntityRef() + orig[3] = *GenTestEntityRef() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exemplar.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exemplar.go new file mode 100644 index 00000000000..5516bfd201d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exemplar.go @@ -0,0 +1,523 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "encoding/binary" + "fmt" + "math" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +func (m *Exemplar) GetValue() any { + if m != nil { + return m.Value + } + return nil +} + +type Exemplar_AsDouble struct { + AsDouble float64 +} + +func (m *Exemplar) GetAsDouble() float64 { + if v, ok := m.GetValue().(*Exemplar_AsDouble); ok { + return v.AsDouble + } + return float64(0) +} + +type Exemplar_AsInt struct { + AsInt int64 +} + +func (m *Exemplar) GetAsInt() int64 { + if v, ok := m.GetValue().(*Exemplar_AsInt); ok { + return v.AsInt + } + return int64(0) +} + +// Exemplar is a sample input double measurement. +// +// Exemplars also hold information about the environment when the measurement was recorded, +// for example the span and trace ID of the active span when the exemplar was recorded. +type Exemplar struct { + Value any + FilteredAttributes []KeyValue + TimeUnixNano uint64 + TraceId TraceID + SpanId SpanID +} + +var ( + protoPoolExemplar = sync.Pool{ + New: func() any { + return &Exemplar{} + }, + } + + ProtoPoolExemplar_AsDouble = sync.Pool{ + New: func() any { + return &Exemplar_AsDouble{} + }, + } + + ProtoPoolExemplar_AsInt = sync.Pool{ + New: func() any { + return &Exemplar_AsInt{} + }, + } +) + +func NewExemplar() *Exemplar { + if !UseProtoPooling.IsEnabled() { + return &Exemplar{} + } + return protoPoolExemplar.Get().(*Exemplar) +} + +func DeleteExemplar(orig *Exemplar, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.FilteredAttributes { + DeleteKeyValue(&orig.FilteredAttributes[i], false) + } + + switch ov := orig.Value.(type) { + case *Exemplar_AsDouble: + if UseProtoPooling.IsEnabled() { + ov.AsDouble = float64(0) + ProtoPoolExemplar_AsDouble.Put(ov) + } + case *Exemplar_AsInt: + if UseProtoPooling.IsEnabled() { + ov.AsInt = int64(0) + ProtoPoolExemplar_AsInt.Put(ov) + } + } + DeleteTraceID(&orig.TraceId, false) + DeleteSpanID(&orig.SpanId, false) + orig.Reset() + if nullable { + protoPoolExemplar.Put(orig) + } +} + +func CopyExemplar(dest, src *Exemplar) *Exemplar { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExemplar() + } + dest.FilteredAttributes = CopyKeyValueSlice(dest.FilteredAttributes, src.FilteredAttributes) + + dest.TimeUnixNano = src.TimeUnixNano + switch t := src.Value.(type) { + case *Exemplar_AsDouble: + var ov *Exemplar_AsDouble + if !UseProtoPooling.IsEnabled() { + ov = &Exemplar_AsDouble{} + } else { + ov = ProtoPoolExemplar_AsDouble.Get().(*Exemplar_AsDouble) + } + ov.AsDouble = t.AsDouble + dest.Value = ov + + case *Exemplar_AsInt: + var ov *Exemplar_AsInt + if !UseProtoPooling.IsEnabled() { + ov = &Exemplar_AsInt{} + } else { + ov = ProtoPoolExemplar_AsInt.Get().(*Exemplar_AsInt) + } + ov.AsInt = t.AsInt + dest.Value = ov + + default: + dest.Value = nil + } + CopyTraceID(&dest.TraceId, &src.TraceId) + + CopySpanID(&dest.SpanId, &src.SpanId) + + return dest +} + +func CopyExemplarSlice(dest, src []Exemplar) []Exemplar { + var newDest []Exemplar + if cap(dest) < len(src) { + newDest = make([]Exemplar, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExemplar(&dest[i], false) + } + } + for i := range src { + CopyExemplar(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExemplarPtrSlice(dest, src []*Exemplar) []*Exemplar { + var newDest []*Exemplar + if cap(dest) < len(src) { + newDest = make([]*Exemplar, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExemplar() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExemplar(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExemplar() + } + } + for i := range src { + CopyExemplar(newDest[i], src[i]) + } + return newDest +} + +func (orig *Exemplar) Reset() { + *orig = Exemplar{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Exemplar) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.FilteredAttributes) > 0 { + dest.WriteObjectField("filteredAttributes") + dest.WriteArrayStart() + orig.FilteredAttributes[0].MarshalJSON(dest) + for i := 1; i < len(orig.FilteredAttributes); i++ { + dest.WriteMore() + orig.FilteredAttributes[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + if orig.TimeUnixNano != uint64(0) { + dest.WriteObjectField("timeUnixNano") + dest.WriteUint64(orig.TimeUnixNano) + } + switch orig := orig.Value.(type) { + case *Exemplar_AsDouble: + dest.WriteObjectField("asDouble") + dest.WriteFloat64(orig.AsDouble) + case *Exemplar_AsInt: + dest.WriteObjectField("asInt") + dest.WriteInt64(orig.AsInt) + } + if !orig.TraceId.IsEmpty() { + dest.WriteObjectField("traceId") + orig.TraceId.MarshalJSON(dest) + } + if !orig.SpanId.IsEmpty() { + dest.WriteObjectField("spanId") + orig.SpanId.MarshalJSON(dest) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Exemplar) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "filteredAttributes", "filtered_attributes": + for iter.ReadArray() { + orig.FilteredAttributes = append(orig.FilteredAttributes, KeyValue{}) + orig.FilteredAttributes[len(orig.FilteredAttributes)-1].UnmarshalJSON(iter) + } + + case "timeUnixNano", "time_unix_nano": + orig.TimeUnixNano = iter.ReadUint64() + + case "asDouble", "as_double": + { + var ov *Exemplar_AsDouble + if !UseProtoPooling.IsEnabled() { + ov = &Exemplar_AsDouble{} + } else { + ov = ProtoPoolExemplar_AsDouble.Get().(*Exemplar_AsDouble) + } + ov.AsDouble = iter.ReadFloat64() + orig.Value = ov + } + case "asInt", "as_int": + { + var ov *Exemplar_AsInt + if !UseProtoPooling.IsEnabled() { + ov = &Exemplar_AsInt{} + } else { + ov = ProtoPoolExemplar_AsInt.Get().(*Exemplar_AsInt) + } + ov.AsInt = iter.ReadInt64() + orig.Value = ov + } + + case "traceId", "trace_id": + + orig.TraceId.UnmarshalJSON(iter) + case "spanId", "span_id": + + orig.SpanId.UnmarshalJSON(iter) + default: + iter.Skip() + } + } +} + +func (orig *Exemplar) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.FilteredAttributes { + l = orig.FilteredAttributes[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + if orig.TimeUnixNano != uint64(0) { + n += 9 + } + switch orig := orig.Value.(type) { + case nil: + _ = orig + break + case *Exemplar_AsDouble: + + n += 9 + case *Exemplar_AsInt: + + n += 9 + } + l = orig.TraceId.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + l = orig.SpanId.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + return n +} + +func (orig *Exemplar) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.FilteredAttributes) - 1; i >= 0; i-- { + l = orig.FilteredAttributes[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x3a + } + if orig.TimeUnixNano != uint64(0) { + pos -= 8 + binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano)) + pos-- + buf[pos] = 0x11 + } + switch orig := orig.Value.(type) { + case *Exemplar_AsDouble: + pos -= 8 + binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.AsDouble)) + pos-- + buf[pos] = 0x19 + + case *Exemplar_AsInt: + pos -= 8 + binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.AsInt)) + pos-- + buf[pos] = 0x31 + + } + l = orig.TraceId.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x2a + + l = orig.SpanId.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x22 + + return len(buf) - pos +} + +func (orig *Exemplar) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 7: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field FilteredAttributes", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.FilteredAttributes = append(orig.FilteredAttributes, KeyValue{}) + err = orig.FilteredAttributes[len(orig.FilteredAttributes)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeI64 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeI64(buf, pos) + if err != nil { + return err + } + + orig.TimeUnixNano = uint64(num) + + case 3: + if wireType != proto.WireTypeI64 { + return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeI64(buf, pos) + if err != nil { + return err + } + var ov *Exemplar_AsDouble + if !UseProtoPooling.IsEnabled() { + ov = &Exemplar_AsDouble{} + } else { + ov = ProtoPoolExemplar_AsDouble.Get().(*Exemplar_AsDouble) + } + ov.AsDouble = math.Float64frombits(num) + orig.Value = ov + + case 6: + if wireType != proto.WireTypeI64 { + return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeI64(buf, pos) + if err != nil { + return err + } + var ov *Exemplar_AsInt + if !UseProtoPooling.IsEnabled() { + ov = &Exemplar_AsInt{} + } else { + ov = ProtoPoolExemplar_AsInt.Get().(*Exemplar_AsInt) + } + ov.AsInt = int64(num) + orig.Value = ov + + case 5: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.TraceId.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 4: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.SpanId.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExemplar() *Exemplar { + orig := NewExemplar() + orig.FilteredAttributes = []KeyValue{{}, *GenTestKeyValue()} + orig.TimeUnixNano = uint64(13) + orig.Value = &Exemplar_AsDouble{AsDouble: float64(3.1415926)} + orig.TraceId = *GenTestTraceID() + orig.SpanId = *GenTestSpanID() + return orig +} + +func GenTestExemplarPtrSlice() []*Exemplar { + orig := make([]*Exemplar, 5) + orig[0] = NewExemplar() + orig[1] = GenTestExemplar() + orig[2] = NewExemplar() + orig[3] = GenTestExemplar() + orig[4] = NewExemplar() + return orig +} + +func GenTestExemplarSlice() []Exemplar { + orig := make([]Exemplar, 5) + orig[1] = *GenTestExemplar() + orig[3] = *GenTestExemplar() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogram.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogram.go new file mode 100644 index 00000000000..5d5e07793d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogram.go @@ -0,0 +1,275 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ExponentialHistogram represents the type of a metric that is calculated by aggregating +// as a ExponentialHistogram of all reported double measurements over a time interval. +type ExponentialHistogram struct { + DataPoints []*ExponentialHistogramDataPoint + AggregationTemporality AggregationTemporality +} + +var ( + protoPoolExponentialHistogram = sync.Pool{ + New: func() any { + return &ExponentialHistogram{} + }, + } +) + +func NewExponentialHistogram() *ExponentialHistogram { + if !UseProtoPooling.IsEnabled() { + return &ExponentialHistogram{} + } + return protoPoolExponentialHistogram.Get().(*ExponentialHistogram) +} + +func DeleteExponentialHistogram(orig *ExponentialHistogram, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.DataPoints { + DeleteExponentialHistogramDataPoint(orig.DataPoints[i], true) + } + + orig.Reset() + if nullable { + protoPoolExponentialHistogram.Put(orig) + } +} + +func CopyExponentialHistogram(dest, src *ExponentialHistogram) *ExponentialHistogram { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExponentialHistogram() + } + dest.DataPoints = CopyExponentialHistogramDataPointPtrSlice(dest.DataPoints, src.DataPoints) + + dest.AggregationTemporality = src.AggregationTemporality + + return dest +} + +func CopyExponentialHistogramSlice(dest, src []ExponentialHistogram) []ExponentialHistogram { + var newDest []ExponentialHistogram + if cap(dest) < len(src) { + newDest = make([]ExponentialHistogram, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExponentialHistogram(&dest[i], false) + } + } + for i := range src { + CopyExponentialHistogram(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExponentialHistogramPtrSlice(dest, src []*ExponentialHistogram) []*ExponentialHistogram { + var newDest []*ExponentialHistogram + if cap(dest) < len(src) { + newDest = make([]*ExponentialHistogram, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExponentialHistogram() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExponentialHistogram(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExponentialHistogram() + } + } + for i := range src { + CopyExponentialHistogram(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExponentialHistogram) Reset() { + *orig = ExponentialHistogram{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExponentialHistogram) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.DataPoints) > 0 { + dest.WriteObjectField("dataPoints") + dest.WriteArrayStart() + orig.DataPoints[0].MarshalJSON(dest) + for i := 1; i < len(orig.DataPoints); i++ { + dest.WriteMore() + orig.DataPoints[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + + if int32(orig.AggregationTemporality) != 0 { + dest.WriteObjectField("aggregationTemporality") + dest.WriteInt32(int32(orig.AggregationTemporality)) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExponentialHistogram) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "dataPoints", "data_points": + for iter.ReadArray() { + orig.DataPoints = append(orig.DataPoints, NewExponentialHistogramDataPoint()) + orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter) + } + + case "aggregationTemporality", "aggregation_temporality": + orig.AggregationTemporality = AggregationTemporality(iter.ReadEnumValue(AggregationTemporality_value)) + default: + iter.Skip() + } + } +} + +func (orig *ExponentialHistogram) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.DataPoints { + l = orig.DataPoints[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + if orig.AggregationTemporality != AggregationTemporality(0) { + n += 1 + proto.Sov(uint64(orig.AggregationTemporality)) + } + return n +} + +func (orig *ExponentialHistogram) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.DataPoints) - 1; i >= 0; i-- { + l = orig.DataPoints[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + if orig.AggregationTemporality != AggregationTemporality(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality)) + pos-- + buf[pos] = 0x10 + } + return len(buf) - pos +} + +func (orig *ExponentialHistogram) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.DataPoints = append(orig.DataPoints, NewExponentialHistogramDataPoint()) + err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.AggregationTemporality = AggregationTemporality(num) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExponentialHistogram() *ExponentialHistogram { + orig := NewExponentialHistogram() + orig.DataPoints = []*ExponentialHistogramDataPoint{{}, GenTestExponentialHistogramDataPoint()} + orig.AggregationTemporality = AggregationTemporality(13) + return orig +} + +func GenTestExponentialHistogramPtrSlice() []*ExponentialHistogram { + orig := make([]*ExponentialHistogram, 5) + orig[0] = NewExponentialHistogram() + orig[1] = GenTestExponentialHistogram() + orig[2] = NewExponentialHistogram() + orig[3] = GenTestExponentialHistogram() + orig[4] = NewExponentialHistogram() + return orig +} + +func GenTestExponentialHistogramSlice() []ExponentialHistogram { + orig := make([]ExponentialHistogram, 5) + orig[1] = *GenTestExponentialHistogram() + orig[3] = *GenTestExponentialHistogram() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapoint.go similarity index 50% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapoint.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapoint.go index 29bc0011c91..e0407b073e0 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapoint.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapoint.go @@ -12,45 +12,48 @@ import ( "math" "sync" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains +// summary statistics for a population of values, it may optionally contain the +// distribution of those values across a set of buckets. +type ExponentialHistogramDataPoint struct { + Positive ExponentialHistogramDataPointBuckets + Negative ExponentialHistogramDataPointBuckets + Attributes []KeyValue + Exemplars []Exemplar + StartTimeUnixNano uint64 + TimeUnixNano uint64 + Count uint64 + Sum float64 + ZeroCount uint64 + Min float64 + Max float64 + ZeroThreshold float64 + metadata [1]uint64 + Scale int32 + Flags uint32 +} + var ( protoPoolExponentialHistogramDataPoint = sync.Pool{ New: func() any { - return &otlpmetrics.ExponentialHistogramDataPoint{} - }, - } - ProtoPoolExponentialHistogramDataPoint_Sum = sync.Pool{ - New: func() any { - return &otlpmetrics.ExponentialHistogramDataPoint_Sum{} - }, - } - - ProtoPoolExponentialHistogramDataPoint_Min = sync.Pool{ - New: func() any { - return &otlpmetrics.ExponentialHistogramDataPoint_Min{} - }, - } - - ProtoPoolExponentialHistogramDataPoint_Max = sync.Pool{ - New: func() any { - return &otlpmetrics.ExponentialHistogramDataPoint_Max{} + return &ExponentialHistogramDataPoint{} }, } ) -func NewOrigExponentialHistogramDataPoint() *otlpmetrics.ExponentialHistogramDataPoint { +func NewExponentialHistogramDataPoint() *ExponentialHistogramDataPoint { if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.ExponentialHistogramDataPoint{} + return &ExponentialHistogramDataPoint{} } - return protoPoolExponentialHistogramDataPoint.Get().(*otlpmetrics.ExponentialHistogramDataPoint) + return protoPoolExponentialHistogramDataPoint.Get().(*ExponentialHistogramDataPoint) } -func DeleteOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, nullable bool) { +func DeleteExponentialHistogramDataPoint(orig *ExponentialHistogramDataPoint, nullable bool) { if orig == nil { return } @@ -59,38 +62,15 @@ func DeleteOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistog orig.Reset() return } - for i := range orig.Attributes { - DeleteOrigKeyValue(&orig.Attributes[i], false) - } - switch ov := orig.Sum_.(type) { - case *otlpmetrics.ExponentialHistogramDataPoint_Sum: - if UseProtoPooling.IsEnabled() { - ov.Sum = float64(0) - ProtoPoolExponentialHistogramDataPoint_Sum.Put(ov) - } - - } - DeleteOrigExponentialHistogramDataPoint_Buckets(&orig.Positive, false) - DeleteOrigExponentialHistogramDataPoint_Buckets(&orig.Negative, false) - for i := range orig.Exemplars { - DeleteOrigExemplar(&orig.Exemplars[i], false) + DeleteKeyValue(&orig.Attributes[i], false) } - switch ov := orig.Min_.(type) { - case *otlpmetrics.ExponentialHistogramDataPoint_Min: - if UseProtoPooling.IsEnabled() { - ov.Min = float64(0) - ProtoPoolExponentialHistogramDataPoint_Min.Put(ov) - } - } - switch ov := orig.Max_.(type) { - case *otlpmetrics.ExponentialHistogramDataPoint_Max: - if UseProtoPooling.IsEnabled() { - ov.Max = float64(0) - ProtoPoolExponentialHistogramDataPoint_Max.Put(ov) - } + DeleteExponentialHistogramDataPointBuckets(&orig.Positive, false) + DeleteExponentialHistogramDataPointBuckets(&orig.Negative, false) + for i := range orig.Exemplars { + DeleteExemplar(&orig.Exemplars[i], false) } orig.Reset() @@ -99,83 +79,118 @@ func DeleteOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistog } } -func CopyOrigExponentialHistogramDataPoint(dest, src *otlpmetrics.ExponentialHistogramDataPoint) { +func CopyExponentialHistogramDataPoint(dest, src *ExponentialHistogramDataPoint) *ExponentialHistogramDataPoint { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil } - dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes) + + if dest == nil { + dest = NewExponentialHistogramDataPoint() + } + dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes) + dest.StartTimeUnixNano = src.StartTimeUnixNano dest.TimeUnixNano = src.TimeUnixNano dest.Count = src.Count - if srcSum, ok := src.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum); ok { - destSum, ok := dest.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum) - if !ok { - destSum = &otlpmetrics.ExponentialHistogramDataPoint_Sum{} - dest.Sum_ = destSum - } - destSum.Sum = srcSum.Sum + if src.HasSum() { + dest.SetSum(src.Sum) } else { - dest.Sum_ = nil + dest.RemoveSum() } + dest.Scale = src.Scale dest.ZeroCount = src.ZeroCount - CopyOrigExponentialHistogramDataPoint_Buckets(&dest.Positive, &src.Positive) - CopyOrigExponentialHistogramDataPoint_Buckets(&dest.Negative, &src.Negative) + CopyExponentialHistogramDataPointBuckets(&dest.Positive, &src.Positive) + + CopyExponentialHistogramDataPointBuckets(&dest.Negative, &src.Negative) + dest.Flags = src.Flags - dest.Exemplars = CopyOrigExemplarSlice(dest.Exemplars, src.Exemplars) - if srcMin, ok := src.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min); ok { - destMin, ok := dest.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min) - if !ok { - destMin = &otlpmetrics.ExponentialHistogramDataPoint_Min{} - dest.Min_ = destMin - } - destMin.Min = srcMin.Min + dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars) + + if src.HasMin() { + dest.SetMin(src.Min) } else { - dest.Min_ = nil + dest.RemoveMin() } - if srcMax, ok := src.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max); ok { - destMax, ok := dest.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max) - if !ok { - destMax = &otlpmetrics.ExponentialHistogramDataPoint_Max{} - dest.Max_ = destMax - } - destMax.Max = srcMax.Max + + if src.HasMax() { + dest.SetMax(src.Max) } else { - dest.Max_ = nil + dest.RemoveMax() } + dest.ZeroThreshold = src.ZeroThreshold + + return dest } -func GenTestOrigExponentialHistogramDataPoint() *otlpmetrics.ExponentialHistogramDataPoint { - orig := NewOrigExponentialHistogramDataPoint() - orig.Attributes = GenerateOrigTestKeyValueSlice() - orig.StartTimeUnixNano = 1234567890 - orig.TimeUnixNano = 1234567890 - orig.Count = uint64(13) - orig.Sum_ = &otlpmetrics.ExponentialHistogramDataPoint_Sum{Sum: float64(3.1415926)} - orig.Scale = int32(13) - orig.ZeroCount = uint64(13) - orig.Positive = *GenTestOrigExponentialHistogramDataPoint_Buckets() - orig.Negative = *GenTestOrigExponentialHistogramDataPoint_Buckets() - orig.Flags = 1 - orig.Exemplars = GenerateOrigTestExemplarSlice() - orig.Min_ = &otlpmetrics.ExponentialHistogramDataPoint_Min{Min: float64(3.1415926)} - orig.Max_ = &otlpmetrics.ExponentialHistogramDataPoint_Max{Max: float64(3.1415926)} - orig.ZeroThreshold = float64(3.1415926) - return orig +func CopyExponentialHistogramDataPointSlice(dest, src []ExponentialHistogramDataPoint) []ExponentialHistogramDataPoint { + var newDest []ExponentialHistogramDataPoint + if cap(dest) < len(src) { + newDest = make([]ExponentialHistogramDataPoint, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExponentialHistogramDataPoint(&dest[i], false) + } + } + for i := range src { + CopyExponentialHistogramDataPoint(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExponentialHistogramDataPointPtrSlice(dest, src []*ExponentialHistogramDataPoint) []*ExponentialHistogramDataPoint { + var newDest []*ExponentialHistogramDataPoint + if cap(dest) < len(src) { + newDest = make([]*ExponentialHistogramDataPoint, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExponentialHistogramDataPoint() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExponentialHistogramDataPoint(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExponentialHistogramDataPoint() + } + } + for i := range src { + CopyExponentialHistogramDataPoint(newDest[i], src[i]) + } + return newDest } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, dest *json.Stream) { +func (orig *ExponentialHistogramDataPoint) Reset() { + *orig = ExponentialHistogramDataPoint{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExponentialHistogramDataPoint) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() if len(orig.Attributes) > 0 { dest.WriteObjectField("attributes") dest.WriteArrayStart() - MarshalJSONOrigKeyValue(&orig.Attributes[0], dest) + orig.Attributes[0].MarshalJSON(dest) for i := 1; i < len(orig.Attributes); i++ { dest.WriteMore() - MarshalJSONOrigKeyValue(&orig.Attributes[i], dest) + orig.Attributes[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -191,7 +206,7 @@ func MarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialH dest.WriteObjectField("count") dest.WriteUint64(orig.Count) } - if orig, ok := orig.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum); ok { + if orig.HasSum() { dest.WriteObjectField("sum") dest.WriteFloat64(orig.Sum) } @@ -204,9 +219,9 @@ func MarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialH dest.WriteUint64(orig.ZeroCount) } dest.WriteObjectField("positive") - MarshalJSONOrigExponentialHistogramDataPoint_Buckets(&orig.Positive, dest) + orig.Positive.MarshalJSON(dest) dest.WriteObjectField("negative") - MarshalJSONOrigExponentialHistogramDataPoint_Buckets(&orig.Negative, dest) + orig.Negative.MarshalJSON(dest) if orig.Flags != uint32(0) { dest.WriteObjectField("flags") dest.WriteUint32(orig.Flags) @@ -214,18 +229,18 @@ func MarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialH if len(orig.Exemplars) > 0 { dest.WriteObjectField("exemplars") dest.WriteArrayStart() - MarshalJSONOrigExemplar(&orig.Exemplars[0], dest) + orig.Exemplars[0].MarshalJSON(dest) for i := 1; i < len(orig.Exemplars); i++ { dest.WriteMore() - MarshalJSONOrigExemplar(&orig.Exemplars[i], dest) + orig.Exemplars[i].MarshalJSON(dest) } dest.WriteArrayEnd() } - if orig, ok := orig.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min); ok { + if orig.HasMin() { dest.WriteObjectField("min") dest.WriteFloat64(orig.Min) } - if orig, ok := orig.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max); ok { + if orig.HasMax() { dest.WriteObjectField("max") dest.WriteFloat64(orig.Max) } @@ -236,14 +251,14 @@ func MarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialH dest.WriteObjectEnd() } -// UnmarshalJSONOrigExponentialHistogramDataPoint unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExponentialHistogramDataPoint) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "attributes": for iter.ReadArray() { - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter) + orig.Attributes = append(orig.Attributes, KeyValue{}) + orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter) } case "startTimeUnixNano", "start_time_unix_nano": @@ -253,56 +268,31 @@ func UnmarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponentia case "count": orig.Count = iter.ReadUint64() case "sum": - { - var ov *otlpmetrics.ExponentialHistogramDataPoint_Sum - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.ExponentialHistogramDataPoint_Sum{} - } else { - ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Sum) - } - ov.Sum = iter.ReadFloat64() - orig.Sum_ = ov - } + orig.SetSum(iter.ReadFloat64()) case "scale": orig.Scale = iter.ReadInt32() case "zeroCount", "zero_count": orig.ZeroCount = iter.ReadUint64() case "positive": - UnmarshalJSONOrigExponentialHistogramDataPoint_Buckets(&orig.Positive, iter) + + orig.Positive.UnmarshalJSON(iter) case "negative": - UnmarshalJSONOrigExponentialHistogramDataPoint_Buckets(&orig.Negative, iter) + + orig.Negative.UnmarshalJSON(iter) case "flags": orig.Flags = iter.ReadUint32() case "exemplars": for iter.ReadArray() { - orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{}) - UnmarshalJSONOrigExemplar(&orig.Exemplars[len(orig.Exemplars)-1], iter) + orig.Exemplars = append(orig.Exemplars, Exemplar{}) + orig.Exemplars[len(orig.Exemplars)-1].UnmarshalJSON(iter) } case "min": - { - var ov *otlpmetrics.ExponentialHistogramDataPoint_Min - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.ExponentialHistogramDataPoint_Min{} - } else { - ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Min) - } - ov.Min = iter.ReadFloat64() - orig.Min_ = ov - } + orig.SetMin(iter.ReadFloat64()) case "max": - { - var ov *otlpmetrics.ExponentialHistogramDataPoint_Max - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.ExponentialHistogramDataPoint_Max{} - } else { - ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Max) - } - ov.Max = iter.ReadFloat64() - orig.Max_ = ov - } + orig.SetMax(iter.ReadFloat64()) case "zeroThreshold", "zero_threshold": orig.ZeroThreshold = iter.ReadFloat64() @@ -312,142 +302,138 @@ func UnmarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponentia } } -func SizeProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint) int { +func (orig *ExponentialHistogramDataPoint) SizeProto() int { var n int var l int _ = l for i := range orig.Attributes { - l = SizeProtoOrigKeyValue(&orig.Attributes[i]) + l = orig.Attributes[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.StartTimeUnixNano != 0 { + if orig.StartTimeUnixNano != uint64(0) { n += 9 } - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { n += 9 } - if orig.Count != 0 { + if orig.Count != uint64(0) { n += 9 } - if orig, ok := orig.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum); ok { - _ = orig + if orig.HasSum() { n += 9 } - if orig.Scale != 0 { + if orig.Scale != int32(0) { n += 1 + proto.Soz(uint64(orig.Scale)) } - if orig.ZeroCount != 0 { + if orig.ZeroCount != uint64(0) { n += 9 } - l = SizeProtoOrigExponentialHistogramDataPoint_Buckets(&orig.Positive) + l = orig.Positive.SizeProto() n += 1 + proto.Sov(uint64(l)) + l - l = SizeProtoOrigExponentialHistogramDataPoint_Buckets(&orig.Negative) + l = orig.Negative.SizeProto() n += 1 + proto.Sov(uint64(l)) + l - if orig.Flags != 0 { + if orig.Flags != uint32(0) { n += 1 + proto.Sov(uint64(orig.Flags)) } for i := range orig.Exemplars { - l = SizeProtoOrigExemplar(&orig.Exemplars[i]) + l = orig.Exemplars[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig, ok := orig.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min); ok { - _ = orig + if orig.HasMin() { n += 9 } - if orig, ok := orig.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max); ok { - _ = orig + if orig.HasMax() { n += 9 } - if orig.ZeroThreshold != 0 { + if orig.ZeroThreshold != float64(0) { n += 9 } return n } -func MarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, buf []byte) int { +func (orig *ExponentialHistogramDataPoint) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l for i := len(orig.Attributes) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos]) + l = orig.Attributes[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0xa } - if orig.StartTimeUnixNano != 0 { + if orig.StartTimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano)) pos-- buf[pos] = 0x11 } - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano)) pos-- buf[pos] = 0x19 } - if orig.Count != 0 { + if orig.Count != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.Count)) pos-- buf[pos] = 0x21 } - if orig, ok := orig.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum); ok { + if orig.HasSum() { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum)) pos-- buf[pos] = 0x29 } - if orig.Scale != 0 { + if orig.Scale != int32(0) { pos = proto.EncodeVarint(buf, pos, uint64((uint32(orig.Scale)<<1)^uint32(orig.Scale>>31))) pos-- buf[pos] = 0x30 } - if orig.ZeroCount != 0 { + if orig.ZeroCount != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.ZeroCount)) pos-- buf[pos] = 0x39 } - - l = MarshalProtoOrigExponentialHistogramDataPoint_Buckets(&orig.Positive, buf[:pos]) + l = orig.Positive.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x42 - l = MarshalProtoOrigExponentialHistogramDataPoint_Buckets(&orig.Negative, buf[:pos]) + l = orig.Negative.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x4a - if orig.Flags != 0 { + if orig.Flags != uint32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags)) pos-- buf[pos] = 0x50 } for i := len(orig.Exemplars) - 1; i >= 0; i-- { - l = MarshalProtoOrigExemplar(&orig.Exemplars[i], buf[:pos]) + l = orig.Exemplars[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x5a } - if orig, ok := orig.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min); ok { + if orig.HasMin() { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Min)) pos-- buf[pos] = 0x61 } - if orig, ok := orig.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max); ok { + if orig.HasMax() { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Max)) pos-- buf[pos] = 0x69 } - if orig.ZeroThreshold != 0 { + if orig.ZeroThreshold != float64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.ZeroThreshold)) pos-- @@ -456,7 +442,7 @@ func MarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponential return len(buf) - pos } -func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, buf []byte) error { +func (orig *ExponentialHistogramDataPoint) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -481,8 +467,8 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti return err } startPos := pos - length - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos]) + orig.Attributes = append(orig.Attributes, KeyValue{}) + err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -532,14 +518,7 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti if err != nil { return err } - var ov *otlpmetrics.ExponentialHistogramDataPoint_Sum - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.ExponentialHistogramDataPoint_Sum{} - } else { - ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Sum) - } - ov.Sum = math.Float64frombits(num) - orig.Sum_ = ov + orig.SetSum(math.Float64frombits(num)) case 6: if wireType != proto.WireTypeVarint { @@ -550,7 +529,6 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti if err != nil { return err } - orig.Scale = int32(uint32(num>>1) ^ uint32(int32((num&1)<<31)>>31)) case 7: @@ -576,7 +554,7 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti } startPos := pos - length - err = UnmarshalProtoOrigExponentialHistogramDataPoint_Buckets(&orig.Positive, buf[startPos:pos]) + err = orig.Positive.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -592,7 +570,7 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti } startPos := pos - length - err = UnmarshalProtoOrigExponentialHistogramDataPoint_Buckets(&orig.Negative, buf[startPos:pos]) + err = orig.Negative.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -606,7 +584,6 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti if err != nil { return err } - orig.Flags = uint32(num) case 11: @@ -619,8 +596,8 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti return err } startPos := pos - length - orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{}) - err = UnmarshalProtoOrigExemplar(&orig.Exemplars[len(orig.Exemplars)-1], buf[startPos:pos]) + orig.Exemplars = append(orig.Exemplars, Exemplar{}) + err = orig.Exemplars[len(orig.Exemplars)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -634,14 +611,7 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti if err != nil { return err } - var ov *otlpmetrics.ExponentialHistogramDataPoint_Min - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.ExponentialHistogramDataPoint_Min{} - } else { - ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Min) - } - ov.Min = math.Float64frombits(num) - orig.Min_ = ov + orig.SetMin(math.Float64frombits(num)) case 13: if wireType != proto.WireTypeI64 { @@ -652,14 +622,7 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti if err != nil { return err } - var ov *otlpmetrics.ExponentialHistogramDataPoint_Max - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.ExponentialHistogramDataPoint_Max{} - } else { - ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Max) - } - ov.Max = math.Float64frombits(num) - orig.Max_ = ov + orig.SetMax(math.Float64frombits(num)) case 14: if wireType != proto.WireTypeI64 { @@ -670,7 +633,6 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti if err != nil { return err } - orig.ZeroThreshold = math.Float64frombits(num) default: pos, err = proto.ConsumeUnknown(buf, pos, wireType) @@ -681,3 +643,90 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti } return nil } + +const fieldBlockExponentialHistogramDataPointSum = uint64(0 >> 6) +const fieldBitExponentialHistogramDataPointSum = uint64(1 << 0 & 0x3F) + +func (m *ExponentialHistogramDataPoint) SetSum(value float64) { + m.Sum = value + m.metadata[fieldBlockExponentialHistogramDataPointSum] |= fieldBitExponentialHistogramDataPointSum +} + +func (m *ExponentialHistogramDataPoint) RemoveSum() { + m.Sum = float64(0) + m.metadata[fieldBlockExponentialHistogramDataPointSum] &^= fieldBitExponentialHistogramDataPointSum +} + +func (m *ExponentialHistogramDataPoint) HasSum() bool { + return m.metadata[fieldBlockExponentialHistogramDataPointSum]&fieldBitExponentialHistogramDataPointSum != 0 +} + +const fieldBlockExponentialHistogramDataPointMin = uint64(1 >> 6) +const fieldBitExponentialHistogramDataPointMin = uint64(1 << 1 & 0x3F) + +func (m *ExponentialHistogramDataPoint) SetMin(value float64) { + m.Min = value + m.metadata[fieldBlockExponentialHistogramDataPointMin] |= fieldBitExponentialHistogramDataPointMin +} + +func (m *ExponentialHistogramDataPoint) RemoveMin() { + m.Min = float64(0) + m.metadata[fieldBlockExponentialHistogramDataPointMin] &^= fieldBitExponentialHistogramDataPointMin +} + +func (m *ExponentialHistogramDataPoint) HasMin() bool { + return m.metadata[fieldBlockExponentialHistogramDataPointMin]&fieldBitExponentialHistogramDataPointMin != 0 +} + +const fieldBlockExponentialHistogramDataPointMax = uint64(2 >> 6) +const fieldBitExponentialHistogramDataPointMax = uint64(1 << 2 & 0x3F) + +func (m *ExponentialHistogramDataPoint) SetMax(value float64) { + m.Max = value + m.metadata[fieldBlockExponentialHistogramDataPointMax] |= fieldBitExponentialHistogramDataPointMax +} + +func (m *ExponentialHistogramDataPoint) RemoveMax() { + m.Max = float64(0) + m.metadata[fieldBlockExponentialHistogramDataPointMax] &^= fieldBitExponentialHistogramDataPointMax +} + +func (m *ExponentialHistogramDataPoint) HasMax() bool { + return m.metadata[fieldBlockExponentialHistogramDataPointMax]&fieldBitExponentialHistogramDataPointMax != 0 +} + +func GenTestExponentialHistogramDataPoint() *ExponentialHistogramDataPoint { + orig := NewExponentialHistogramDataPoint() + orig.Attributes = []KeyValue{{}, *GenTestKeyValue()} + orig.StartTimeUnixNano = uint64(13) + orig.TimeUnixNano = uint64(13) + orig.Count = uint64(13) + orig.SetSum(float64(3.1415926)) + orig.Scale = int32(13) + orig.ZeroCount = uint64(13) + orig.Positive = *GenTestExponentialHistogramDataPointBuckets() + orig.Negative = *GenTestExponentialHistogramDataPointBuckets() + orig.Flags = uint32(13) + orig.Exemplars = []Exemplar{{}, *GenTestExemplar()} + orig.SetMin(float64(3.1415926)) + orig.SetMax(float64(3.1415926)) + orig.ZeroThreshold = float64(3.1415926) + return orig +} + +func GenTestExponentialHistogramDataPointPtrSlice() []*ExponentialHistogramDataPoint { + orig := make([]*ExponentialHistogramDataPoint, 5) + orig[0] = NewExponentialHistogramDataPoint() + orig[1] = GenTestExponentialHistogramDataPoint() + orig[2] = NewExponentialHistogramDataPoint() + orig[3] = GenTestExponentialHistogramDataPoint() + orig[4] = NewExponentialHistogramDataPoint() + return orig +} + +func GenTestExponentialHistogramDataPointSlice() []ExponentialHistogramDataPoint { + orig := make([]ExponentialHistogramDataPoint, 5) + orig[1] = *GenTestExponentialHistogramDataPoint() + orig[3] = *GenTestExponentialHistogramDataPoint() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapointbuckets.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapointbuckets.go new file mode 100644 index 00000000000..15243f12dd4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapointbuckets.go @@ -0,0 +1,290 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ExponentialHistogramDataPointBuckets are a set of bucket counts, encoded in a contiguous array of counts. +type ExponentialHistogramDataPointBuckets struct { + BucketCounts []uint64 + Offset int32 +} + +var ( + protoPoolExponentialHistogramDataPointBuckets = sync.Pool{ + New: func() any { + return &ExponentialHistogramDataPointBuckets{} + }, + } +) + +func NewExponentialHistogramDataPointBuckets() *ExponentialHistogramDataPointBuckets { + if !UseProtoPooling.IsEnabled() { + return &ExponentialHistogramDataPointBuckets{} + } + return protoPoolExponentialHistogramDataPointBuckets.Get().(*ExponentialHistogramDataPointBuckets) +} + +func DeleteExponentialHistogramDataPointBuckets(orig *ExponentialHistogramDataPointBuckets, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + orig.Reset() + if nullable { + protoPoolExponentialHistogramDataPointBuckets.Put(orig) + } +} + +func CopyExponentialHistogramDataPointBuckets(dest, src *ExponentialHistogramDataPointBuckets) *ExponentialHistogramDataPointBuckets { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExponentialHistogramDataPointBuckets() + } + dest.Offset = src.Offset + dest.BucketCounts = append(dest.BucketCounts[:0], src.BucketCounts...) + + return dest +} + +func CopyExponentialHistogramDataPointBucketsSlice(dest, src []ExponentialHistogramDataPointBuckets) []ExponentialHistogramDataPointBuckets { + var newDest []ExponentialHistogramDataPointBuckets + if cap(dest) < len(src) { + newDest = make([]ExponentialHistogramDataPointBuckets, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExponentialHistogramDataPointBuckets(&dest[i], false) + } + } + for i := range src { + CopyExponentialHistogramDataPointBuckets(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExponentialHistogramDataPointBucketsPtrSlice(dest, src []*ExponentialHistogramDataPointBuckets) []*ExponentialHistogramDataPointBuckets { + var newDest []*ExponentialHistogramDataPointBuckets + if cap(dest) < len(src) { + newDest = make([]*ExponentialHistogramDataPointBuckets, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExponentialHistogramDataPointBuckets() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExponentialHistogramDataPointBuckets(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExponentialHistogramDataPointBuckets() + } + } + for i := range src { + CopyExponentialHistogramDataPointBuckets(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExponentialHistogramDataPointBuckets) Reset() { + *orig = ExponentialHistogramDataPointBuckets{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExponentialHistogramDataPointBuckets) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.Offset != int32(0) { + dest.WriteObjectField("offset") + dest.WriteInt32(orig.Offset) + } + if len(orig.BucketCounts) > 0 { + dest.WriteObjectField("bucketCounts") + dest.WriteArrayStart() + dest.WriteUint64(orig.BucketCounts[0]) + for i := 1; i < len(orig.BucketCounts); i++ { + dest.WriteMore() + dest.WriteUint64(orig.BucketCounts[i]) + } + dest.WriteArrayEnd() + } + + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExponentialHistogramDataPointBuckets) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "offset": + orig.Offset = iter.ReadInt32() + case "bucketCounts", "bucket_counts": + for iter.ReadArray() { + orig.BucketCounts = append(orig.BucketCounts, iter.ReadUint64()) + } + + default: + iter.Skip() + } + } +} + +func (orig *ExponentialHistogramDataPointBuckets) SizeProto() int { + var n int + var l int + _ = l + if orig.Offset != int32(0) { + n += 1 + proto.Soz(uint64(orig.Offset)) + } + + if len(orig.BucketCounts) > 0 { + l = 0 + for _, e := range orig.BucketCounts { + l += proto.Sov(uint64(e)) + } + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ExponentialHistogramDataPointBuckets) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + if orig.Offset != int32(0) { + pos = proto.EncodeVarint(buf, pos, uint64((uint32(orig.Offset)<<1)^uint32(orig.Offset>>31))) + pos-- + buf[pos] = 0x8 + } + l = len(orig.BucketCounts) + if l > 0 { + endPos := pos + for i := l - 1; i >= 0; i-- { + pos = proto.EncodeVarint(buf, pos, uint64(orig.BucketCounts[i])) + } + pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos)) + pos-- + buf[pos] = 0x12 + } + return len(buf) - pos +} + +func (orig *ExponentialHistogramDataPointBuckets) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.Offset = int32(uint32(num>>1) ^ uint32(int32((num&1)<<31)>>31)) + case 2: + switch wireType { + case proto.WireTypeLen: + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var num uint64 + for startPos < pos { + num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos) + if err != nil { + return err + } + orig.BucketCounts = append(orig.BucketCounts, uint64(num)) + } + if startPos != pos { + return fmt.Errorf("proto: invalid field len = %d for field BucketCounts", pos-startPos) + } + case proto.WireTypeVarint: + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.BucketCounts = append(orig.BucketCounts, uint64(num)) + default: + return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType) + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExponentialHistogramDataPointBuckets() *ExponentialHistogramDataPointBuckets { + orig := NewExponentialHistogramDataPointBuckets() + orig.Offset = int32(13) + orig.BucketCounts = []uint64{uint64(0), uint64(13)} + return orig +} + +func GenTestExponentialHistogramDataPointBucketsPtrSlice() []*ExponentialHistogramDataPointBuckets { + orig := make([]*ExponentialHistogramDataPointBuckets, 5) + orig[0] = NewExponentialHistogramDataPointBuckets() + orig[1] = GenTestExponentialHistogramDataPointBuckets() + orig[2] = NewExponentialHistogramDataPointBuckets() + orig[3] = GenTestExponentialHistogramDataPointBuckets() + orig[4] = NewExponentialHistogramDataPointBuckets() + return orig +} + +func GenTestExponentialHistogramDataPointBucketsSlice() []ExponentialHistogramDataPointBuckets { + orig := make([]ExponentialHistogramDataPointBuckets, 5) + orig[1] = *GenTestExponentialHistogramDataPointBuckets() + orig[3] = *GenTestExponentialHistogramDataPointBuckets() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogspartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogspartialsuccess.go new file mode 100644 index 00000000000..bccd38e7d26 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogspartialsuccess.go @@ -0,0 +1,257 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ExportPartialSuccess represents the details of a partially successful export request. +type ExportLogsPartialSuccess struct { + ErrorMessage string + RejectedLogRecords int64 +} + +var ( + protoPoolExportLogsPartialSuccess = sync.Pool{ + New: func() any { + return &ExportLogsPartialSuccess{} + }, + } +) + +func NewExportLogsPartialSuccess() *ExportLogsPartialSuccess { + if !UseProtoPooling.IsEnabled() { + return &ExportLogsPartialSuccess{} + } + return protoPoolExportLogsPartialSuccess.Get().(*ExportLogsPartialSuccess) +} + +func DeleteExportLogsPartialSuccess(orig *ExportLogsPartialSuccess, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + orig.Reset() + if nullable { + protoPoolExportLogsPartialSuccess.Put(orig) + } +} + +func CopyExportLogsPartialSuccess(dest, src *ExportLogsPartialSuccess) *ExportLogsPartialSuccess { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExportLogsPartialSuccess() + } + dest.RejectedLogRecords = src.RejectedLogRecords + dest.ErrorMessage = src.ErrorMessage + + return dest +} + +func CopyExportLogsPartialSuccessSlice(dest, src []ExportLogsPartialSuccess) []ExportLogsPartialSuccess { + var newDest []ExportLogsPartialSuccess + if cap(dest) < len(src) { + newDest = make([]ExportLogsPartialSuccess, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportLogsPartialSuccess(&dest[i], false) + } + } + for i := range src { + CopyExportLogsPartialSuccess(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExportLogsPartialSuccessPtrSlice(dest, src []*ExportLogsPartialSuccess) []*ExportLogsPartialSuccess { + var newDest []*ExportLogsPartialSuccess + if cap(dest) < len(src) { + newDest = make([]*ExportLogsPartialSuccess, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportLogsPartialSuccess() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportLogsPartialSuccess(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportLogsPartialSuccess() + } + } + for i := range src { + CopyExportLogsPartialSuccess(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExportLogsPartialSuccess) Reset() { + *orig = ExportLogsPartialSuccess{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExportLogsPartialSuccess) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.RejectedLogRecords != int64(0) { + dest.WriteObjectField("rejectedLogRecords") + dest.WriteInt64(orig.RejectedLogRecords) + } + if orig.ErrorMessage != "" { + dest.WriteObjectField("errorMessage") + dest.WriteString(orig.ErrorMessage) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExportLogsPartialSuccess) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "rejectedLogRecords", "rejected_log_records": + orig.RejectedLogRecords = iter.ReadInt64() + case "errorMessage", "error_message": + orig.ErrorMessage = iter.ReadString() + default: + iter.Skip() + } + } +} + +func (orig *ExportLogsPartialSuccess) SizeProto() int { + var n int + var l int + _ = l + if orig.RejectedLogRecords != int64(0) { + n += 1 + proto.Sov(uint64(orig.RejectedLogRecords)) + } + + l = len(orig.ErrorMessage) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ExportLogsPartialSuccess) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + if orig.RejectedLogRecords != int64(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedLogRecords)) + pos-- + buf[pos] = 0x8 + } + l = len(orig.ErrorMessage) + if l > 0 { + pos -= l + copy(buf[pos:], orig.ErrorMessage) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + return len(buf) - pos +} + +func (orig *ExportLogsPartialSuccess) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field RejectedLogRecords", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.RejectedLogRecords = int64(num) + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ErrorMessage = string(buf[startPos:pos]) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExportLogsPartialSuccess() *ExportLogsPartialSuccess { + orig := NewExportLogsPartialSuccess() + orig.RejectedLogRecords = int64(13) + orig.ErrorMessage = "test_errormessage" + return orig +} + +func GenTestExportLogsPartialSuccessPtrSlice() []*ExportLogsPartialSuccess { + orig := make([]*ExportLogsPartialSuccess, 5) + orig[0] = NewExportLogsPartialSuccess() + orig[1] = GenTestExportLogsPartialSuccess() + orig[2] = NewExportLogsPartialSuccess() + orig[3] = GenTestExportLogsPartialSuccess() + orig[4] = NewExportLogsPartialSuccess() + return orig +} + +func GenTestExportLogsPartialSuccessSlice() []ExportLogsPartialSuccess { + orig := make([]ExportLogsPartialSuccess, 5) + orig[1] = *GenTestExportLogsPartialSuccess() + orig[3] = *GenTestExportLogsPartialSuccess() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsservicerequest.go new file mode 100644 index 00000000000..7fb48b36a21 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsservicerequest.go @@ -0,0 +1,244 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// Logs is the top-level struct that is propagated through the logs pipeline. +// Use NewLogs to create new instance, zero-initialized instance is not valid for use. +type ExportLogsServiceRequest struct { + ResourceLogs []*ResourceLogs +} + +var ( + protoPoolExportLogsServiceRequest = sync.Pool{ + New: func() any { + return &ExportLogsServiceRequest{} + }, + } +) + +func NewExportLogsServiceRequest() *ExportLogsServiceRequest { + if !UseProtoPooling.IsEnabled() { + return &ExportLogsServiceRequest{} + } + return protoPoolExportLogsServiceRequest.Get().(*ExportLogsServiceRequest) +} + +func DeleteExportLogsServiceRequest(orig *ExportLogsServiceRequest, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.ResourceLogs { + DeleteResourceLogs(orig.ResourceLogs[i], true) + } + orig.Reset() + if nullable { + protoPoolExportLogsServiceRequest.Put(orig) + } +} + +func CopyExportLogsServiceRequest(dest, src *ExportLogsServiceRequest) *ExportLogsServiceRequest { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExportLogsServiceRequest() + } + dest.ResourceLogs = CopyResourceLogsPtrSlice(dest.ResourceLogs, src.ResourceLogs) + + return dest +} + +func CopyExportLogsServiceRequestSlice(dest, src []ExportLogsServiceRequest) []ExportLogsServiceRequest { + var newDest []ExportLogsServiceRequest + if cap(dest) < len(src) { + newDest = make([]ExportLogsServiceRequest, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportLogsServiceRequest(&dest[i], false) + } + } + for i := range src { + CopyExportLogsServiceRequest(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExportLogsServiceRequestPtrSlice(dest, src []*ExportLogsServiceRequest) []*ExportLogsServiceRequest { + var newDest []*ExportLogsServiceRequest + if cap(dest) < len(src) { + newDest = make([]*ExportLogsServiceRequest, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportLogsServiceRequest() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportLogsServiceRequest(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportLogsServiceRequest() + } + } + for i := range src { + CopyExportLogsServiceRequest(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExportLogsServiceRequest) Reset() { + *orig = ExportLogsServiceRequest{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExportLogsServiceRequest) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.ResourceLogs) > 0 { + dest.WriteObjectField("resourceLogs") + dest.WriteArrayStart() + orig.ResourceLogs[0].MarshalJSON(dest) + for i := 1; i < len(orig.ResourceLogs); i++ { + dest.WriteMore() + orig.ResourceLogs[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExportLogsServiceRequest) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "resourceLogs", "resource_logs": + for iter.ReadArray() { + orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs()) + orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *ExportLogsServiceRequest) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.ResourceLogs { + l = orig.ResourceLogs[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ExportLogsServiceRequest) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.ResourceLogs) - 1; i >= 0; i-- { + l = orig.ResourceLogs[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + return len(buf) - pos +} + +func (orig *ExportLogsServiceRequest) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs()) + err = orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExportLogsServiceRequest() *ExportLogsServiceRequest { + orig := NewExportLogsServiceRequest() + orig.ResourceLogs = []*ResourceLogs{{}, GenTestResourceLogs()} + return orig +} + +func GenTestExportLogsServiceRequestPtrSlice() []*ExportLogsServiceRequest { + orig := make([]*ExportLogsServiceRequest, 5) + orig[0] = NewExportLogsServiceRequest() + orig[1] = GenTestExportLogsServiceRequest() + orig[2] = NewExportLogsServiceRequest() + orig[3] = GenTestExportLogsServiceRequest() + orig[4] = NewExportLogsServiceRequest() + return orig +} + +func GenTestExportLogsServiceRequestSlice() []ExportLogsServiceRequest { + orig := make([]ExportLogsServiceRequest, 5) + orig[1] = *GenTestExportLogsServiceRequest() + orig[3] = *GenTestExportLogsServiceRequest() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsserviceresponse.go new file mode 100644 index 00000000000..5570fe93d33 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsserviceresponse.go @@ -0,0 +1,227 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ExportResponse represents the response for gRPC/HTTP client/server. +type ExportLogsServiceResponse struct { + PartialSuccess ExportLogsPartialSuccess +} + +var ( + protoPoolExportLogsServiceResponse = sync.Pool{ + New: func() any { + return &ExportLogsServiceResponse{} + }, + } +) + +func NewExportLogsServiceResponse() *ExportLogsServiceResponse { + if !UseProtoPooling.IsEnabled() { + return &ExportLogsServiceResponse{} + } + return protoPoolExportLogsServiceResponse.Get().(*ExportLogsServiceResponse) +} + +func DeleteExportLogsServiceResponse(orig *ExportLogsServiceResponse, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteExportLogsPartialSuccess(&orig.PartialSuccess, false) + orig.Reset() + if nullable { + protoPoolExportLogsServiceResponse.Put(orig) + } +} + +func CopyExportLogsServiceResponse(dest, src *ExportLogsServiceResponse) *ExportLogsServiceResponse { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExportLogsServiceResponse() + } + CopyExportLogsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess) + + return dest +} + +func CopyExportLogsServiceResponseSlice(dest, src []ExportLogsServiceResponse) []ExportLogsServiceResponse { + var newDest []ExportLogsServiceResponse + if cap(dest) < len(src) { + newDest = make([]ExportLogsServiceResponse, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportLogsServiceResponse(&dest[i], false) + } + } + for i := range src { + CopyExportLogsServiceResponse(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExportLogsServiceResponsePtrSlice(dest, src []*ExportLogsServiceResponse) []*ExportLogsServiceResponse { + var newDest []*ExportLogsServiceResponse + if cap(dest) < len(src) { + newDest = make([]*ExportLogsServiceResponse, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportLogsServiceResponse() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportLogsServiceResponse(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportLogsServiceResponse() + } + } + for i := range src { + CopyExportLogsServiceResponse(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExportLogsServiceResponse) Reset() { + *orig = ExportLogsServiceResponse{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExportLogsServiceResponse) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + dest.WriteObjectField("partialSuccess") + orig.PartialSuccess.MarshalJSON(dest) + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExportLogsServiceResponse) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "partialSuccess", "partial_success": + + orig.PartialSuccess.UnmarshalJSON(iter) + default: + iter.Skip() + } + } +} + +func (orig *ExportLogsServiceResponse) SizeProto() int { + var n int + var l int + _ = l + l = orig.PartialSuccess.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + return n +} + +func (orig *ExportLogsServiceResponse) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.PartialSuccess.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + return len(buf) - pos +} + +func (orig *ExportLogsServiceResponse) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExportLogsServiceResponse() *ExportLogsServiceResponse { + orig := NewExportLogsServiceResponse() + orig.PartialSuccess = *GenTestExportLogsPartialSuccess() + return orig +} + +func GenTestExportLogsServiceResponsePtrSlice() []*ExportLogsServiceResponse { + orig := make([]*ExportLogsServiceResponse, 5) + orig[0] = NewExportLogsServiceResponse() + orig[1] = GenTestExportLogsServiceResponse() + orig[2] = NewExportLogsServiceResponse() + orig[3] = GenTestExportLogsServiceResponse() + orig[4] = NewExportLogsServiceResponse() + return orig +} + +func GenTestExportLogsServiceResponseSlice() []ExportLogsServiceResponse { + orig := make([]ExportLogsServiceResponse, 5) + orig[1] = *GenTestExportLogsServiceResponse() + orig[3] = *GenTestExportLogsServiceResponse() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricspartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricspartialsuccess.go new file mode 100644 index 00000000000..4915d3cb741 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricspartialsuccess.go @@ -0,0 +1,257 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ExportPartialSuccess represents the details of a partially successful export request. +type ExportMetricsPartialSuccess struct { + ErrorMessage string + RejectedDataPoints int64 +} + +var ( + protoPoolExportMetricsPartialSuccess = sync.Pool{ + New: func() any { + return &ExportMetricsPartialSuccess{} + }, + } +) + +func NewExportMetricsPartialSuccess() *ExportMetricsPartialSuccess { + if !UseProtoPooling.IsEnabled() { + return &ExportMetricsPartialSuccess{} + } + return protoPoolExportMetricsPartialSuccess.Get().(*ExportMetricsPartialSuccess) +} + +func DeleteExportMetricsPartialSuccess(orig *ExportMetricsPartialSuccess, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + orig.Reset() + if nullable { + protoPoolExportMetricsPartialSuccess.Put(orig) + } +} + +func CopyExportMetricsPartialSuccess(dest, src *ExportMetricsPartialSuccess) *ExportMetricsPartialSuccess { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExportMetricsPartialSuccess() + } + dest.RejectedDataPoints = src.RejectedDataPoints + dest.ErrorMessage = src.ErrorMessage + + return dest +} + +func CopyExportMetricsPartialSuccessSlice(dest, src []ExportMetricsPartialSuccess) []ExportMetricsPartialSuccess { + var newDest []ExportMetricsPartialSuccess + if cap(dest) < len(src) { + newDest = make([]ExportMetricsPartialSuccess, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportMetricsPartialSuccess(&dest[i], false) + } + } + for i := range src { + CopyExportMetricsPartialSuccess(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExportMetricsPartialSuccessPtrSlice(dest, src []*ExportMetricsPartialSuccess) []*ExportMetricsPartialSuccess { + var newDest []*ExportMetricsPartialSuccess + if cap(dest) < len(src) { + newDest = make([]*ExportMetricsPartialSuccess, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportMetricsPartialSuccess() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportMetricsPartialSuccess(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportMetricsPartialSuccess() + } + } + for i := range src { + CopyExportMetricsPartialSuccess(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExportMetricsPartialSuccess) Reset() { + *orig = ExportMetricsPartialSuccess{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExportMetricsPartialSuccess) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.RejectedDataPoints != int64(0) { + dest.WriteObjectField("rejectedDataPoints") + dest.WriteInt64(orig.RejectedDataPoints) + } + if orig.ErrorMessage != "" { + dest.WriteObjectField("errorMessage") + dest.WriteString(orig.ErrorMessage) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExportMetricsPartialSuccess) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "rejectedDataPoints", "rejected_data_points": + orig.RejectedDataPoints = iter.ReadInt64() + case "errorMessage", "error_message": + orig.ErrorMessage = iter.ReadString() + default: + iter.Skip() + } + } +} + +func (orig *ExportMetricsPartialSuccess) SizeProto() int { + var n int + var l int + _ = l + if orig.RejectedDataPoints != int64(0) { + n += 1 + proto.Sov(uint64(orig.RejectedDataPoints)) + } + + l = len(orig.ErrorMessage) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ExportMetricsPartialSuccess) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + if orig.RejectedDataPoints != int64(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedDataPoints)) + pos-- + buf[pos] = 0x8 + } + l = len(orig.ErrorMessage) + if l > 0 { + pos -= l + copy(buf[pos:], orig.ErrorMessage) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + return len(buf) - pos +} + +func (orig *ExportMetricsPartialSuccess) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field RejectedDataPoints", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.RejectedDataPoints = int64(num) + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ErrorMessage = string(buf[startPos:pos]) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExportMetricsPartialSuccess() *ExportMetricsPartialSuccess { + orig := NewExportMetricsPartialSuccess() + orig.RejectedDataPoints = int64(13) + orig.ErrorMessage = "test_errormessage" + return orig +} + +func GenTestExportMetricsPartialSuccessPtrSlice() []*ExportMetricsPartialSuccess { + orig := make([]*ExportMetricsPartialSuccess, 5) + orig[0] = NewExportMetricsPartialSuccess() + orig[1] = GenTestExportMetricsPartialSuccess() + orig[2] = NewExportMetricsPartialSuccess() + orig[3] = GenTestExportMetricsPartialSuccess() + orig[4] = NewExportMetricsPartialSuccess() + return orig +} + +func GenTestExportMetricsPartialSuccessSlice() []ExportMetricsPartialSuccess { + orig := make([]ExportMetricsPartialSuccess, 5) + orig[1] = *GenTestExportMetricsPartialSuccess() + orig[3] = *GenTestExportMetricsPartialSuccess() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsservicerequest.go new file mode 100644 index 00000000000..76cc8020ca9 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsservicerequest.go @@ -0,0 +1,244 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// Metrics is the top-level struct that is propagated through the metrics pipeline. +// Use NewMetrics to create new instance, zero-initialized instance is not valid for use. +type ExportMetricsServiceRequest struct { + ResourceMetrics []*ResourceMetrics +} + +var ( + protoPoolExportMetricsServiceRequest = sync.Pool{ + New: func() any { + return &ExportMetricsServiceRequest{} + }, + } +) + +func NewExportMetricsServiceRequest() *ExportMetricsServiceRequest { + if !UseProtoPooling.IsEnabled() { + return &ExportMetricsServiceRequest{} + } + return protoPoolExportMetricsServiceRequest.Get().(*ExportMetricsServiceRequest) +} + +func DeleteExportMetricsServiceRequest(orig *ExportMetricsServiceRequest, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.ResourceMetrics { + DeleteResourceMetrics(orig.ResourceMetrics[i], true) + } + orig.Reset() + if nullable { + protoPoolExportMetricsServiceRequest.Put(orig) + } +} + +func CopyExportMetricsServiceRequest(dest, src *ExportMetricsServiceRequest) *ExportMetricsServiceRequest { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExportMetricsServiceRequest() + } + dest.ResourceMetrics = CopyResourceMetricsPtrSlice(dest.ResourceMetrics, src.ResourceMetrics) + + return dest +} + +func CopyExportMetricsServiceRequestSlice(dest, src []ExportMetricsServiceRequest) []ExportMetricsServiceRequest { + var newDest []ExportMetricsServiceRequest + if cap(dest) < len(src) { + newDest = make([]ExportMetricsServiceRequest, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportMetricsServiceRequest(&dest[i], false) + } + } + for i := range src { + CopyExportMetricsServiceRequest(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExportMetricsServiceRequestPtrSlice(dest, src []*ExportMetricsServiceRequest) []*ExportMetricsServiceRequest { + var newDest []*ExportMetricsServiceRequest + if cap(dest) < len(src) { + newDest = make([]*ExportMetricsServiceRequest, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportMetricsServiceRequest() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportMetricsServiceRequest(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportMetricsServiceRequest() + } + } + for i := range src { + CopyExportMetricsServiceRequest(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExportMetricsServiceRequest) Reset() { + *orig = ExportMetricsServiceRequest{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExportMetricsServiceRequest) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.ResourceMetrics) > 0 { + dest.WriteObjectField("resourceMetrics") + dest.WriteArrayStart() + orig.ResourceMetrics[0].MarshalJSON(dest) + for i := 1; i < len(orig.ResourceMetrics); i++ { + dest.WriteMore() + orig.ResourceMetrics[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExportMetricsServiceRequest) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "resourceMetrics", "resource_metrics": + for iter.ReadArray() { + orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics()) + orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *ExportMetricsServiceRequest) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.ResourceMetrics { + l = orig.ResourceMetrics[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ExportMetricsServiceRequest) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.ResourceMetrics) - 1; i >= 0; i-- { + l = orig.ResourceMetrics[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + return len(buf) - pos +} + +func (orig *ExportMetricsServiceRequest) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics()) + err = orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExportMetricsServiceRequest() *ExportMetricsServiceRequest { + orig := NewExportMetricsServiceRequest() + orig.ResourceMetrics = []*ResourceMetrics{{}, GenTestResourceMetrics()} + return orig +} + +func GenTestExportMetricsServiceRequestPtrSlice() []*ExportMetricsServiceRequest { + orig := make([]*ExportMetricsServiceRequest, 5) + orig[0] = NewExportMetricsServiceRequest() + orig[1] = GenTestExportMetricsServiceRequest() + orig[2] = NewExportMetricsServiceRequest() + orig[3] = GenTestExportMetricsServiceRequest() + orig[4] = NewExportMetricsServiceRequest() + return orig +} + +func GenTestExportMetricsServiceRequestSlice() []ExportMetricsServiceRequest { + orig := make([]ExportMetricsServiceRequest, 5) + orig[1] = *GenTestExportMetricsServiceRequest() + orig[3] = *GenTestExportMetricsServiceRequest() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsserviceresponse.go new file mode 100644 index 00000000000..ae78a10f9b8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsserviceresponse.go @@ -0,0 +1,227 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ExportResponse represents the response for gRPC/HTTP client/server. +type ExportMetricsServiceResponse struct { + PartialSuccess ExportMetricsPartialSuccess +} + +var ( + protoPoolExportMetricsServiceResponse = sync.Pool{ + New: func() any { + return &ExportMetricsServiceResponse{} + }, + } +) + +func NewExportMetricsServiceResponse() *ExportMetricsServiceResponse { + if !UseProtoPooling.IsEnabled() { + return &ExportMetricsServiceResponse{} + } + return protoPoolExportMetricsServiceResponse.Get().(*ExportMetricsServiceResponse) +} + +func DeleteExportMetricsServiceResponse(orig *ExportMetricsServiceResponse, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteExportMetricsPartialSuccess(&orig.PartialSuccess, false) + orig.Reset() + if nullable { + protoPoolExportMetricsServiceResponse.Put(orig) + } +} + +func CopyExportMetricsServiceResponse(dest, src *ExportMetricsServiceResponse) *ExportMetricsServiceResponse { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExportMetricsServiceResponse() + } + CopyExportMetricsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess) + + return dest +} + +func CopyExportMetricsServiceResponseSlice(dest, src []ExportMetricsServiceResponse) []ExportMetricsServiceResponse { + var newDest []ExportMetricsServiceResponse + if cap(dest) < len(src) { + newDest = make([]ExportMetricsServiceResponse, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportMetricsServiceResponse(&dest[i], false) + } + } + for i := range src { + CopyExportMetricsServiceResponse(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExportMetricsServiceResponsePtrSlice(dest, src []*ExportMetricsServiceResponse) []*ExportMetricsServiceResponse { + var newDest []*ExportMetricsServiceResponse + if cap(dest) < len(src) { + newDest = make([]*ExportMetricsServiceResponse, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportMetricsServiceResponse() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportMetricsServiceResponse(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportMetricsServiceResponse() + } + } + for i := range src { + CopyExportMetricsServiceResponse(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExportMetricsServiceResponse) Reset() { + *orig = ExportMetricsServiceResponse{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExportMetricsServiceResponse) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + dest.WriteObjectField("partialSuccess") + orig.PartialSuccess.MarshalJSON(dest) + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExportMetricsServiceResponse) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "partialSuccess", "partial_success": + + orig.PartialSuccess.UnmarshalJSON(iter) + default: + iter.Skip() + } + } +} + +func (orig *ExportMetricsServiceResponse) SizeProto() int { + var n int + var l int + _ = l + l = orig.PartialSuccess.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + return n +} + +func (orig *ExportMetricsServiceResponse) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.PartialSuccess.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + return len(buf) - pos +} + +func (orig *ExportMetricsServiceResponse) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExportMetricsServiceResponse() *ExportMetricsServiceResponse { + orig := NewExportMetricsServiceResponse() + orig.PartialSuccess = *GenTestExportMetricsPartialSuccess() + return orig +} + +func GenTestExportMetricsServiceResponsePtrSlice() []*ExportMetricsServiceResponse { + orig := make([]*ExportMetricsServiceResponse, 5) + orig[0] = NewExportMetricsServiceResponse() + orig[1] = GenTestExportMetricsServiceResponse() + orig[2] = NewExportMetricsServiceResponse() + orig[3] = GenTestExportMetricsServiceResponse() + orig[4] = NewExportMetricsServiceResponse() + return orig +} + +func GenTestExportMetricsServiceResponseSlice() []ExportMetricsServiceResponse { + orig := make([]ExportMetricsServiceResponse, 5) + orig[1] = *GenTestExportMetricsServiceResponse() + orig[3] = *GenTestExportMetricsServiceResponse() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilespartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilespartialsuccess.go new file mode 100644 index 00000000000..5a75e7a657e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilespartialsuccess.go @@ -0,0 +1,257 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ExportPartialSuccess represents the details of a partially successful export request. +type ExportProfilesPartialSuccess struct { + ErrorMessage string + RejectedProfiles int64 +} + +var ( + protoPoolExportProfilesPartialSuccess = sync.Pool{ + New: func() any { + return &ExportProfilesPartialSuccess{} + }, + } +) + +func NewExportProfilesPartialSuccess() *ExportProfilesPartialSuccess { + if !UseProtoPooling.IsEnabled() { + return &ExportProfilesPartialSuccess{} + } + return protoPoolExportProfilesPartialSuccess.Get().(*ExportProfilesPartialSuccess) +} + +func DeleteExportProfilesPartialSuccess(orig *ExportProfilesPartialSuccess, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + orig.Reset() + if nullable { + protoPoolExportProfilesPartialSuccess.Put(orig) + } +} + +func CopyExportProfilesPartialSuccess(dest, src *ExportProfilesPartialSuccess) *ExportProfilesPartialSuccess { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExportProfilesPartialSuccess() + } + dest.RejectedProfiles = src.RejectedProfiles + dest.ErrorMessage = src.ErrorMessage + + return dest +} + +func CopyExportProfilesPartialSuccessSlice(dest, src []ExportProfilesPartialSuccess) []ExportProfilesPartialSuccess { + var newDest []ExportProfilesPartialSuccess + if cap(dest) < len(src) { + newDest = make([]ExportProfilesPartialSuccess, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportProfilesPartialSuccess(&dest[i], false) + } + } + for i := range src { + CopyExportProfilesPartialSuccess(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExportProfilesPartialSuccessPtrSlice(dest, src []*ExportProfilesPartialSuccess) []*ExportProfilesPartialSuccess { + var newDest []*ExportProfilesPartialSuccess + if cap(dest) < len(src) { + newDest = make([]*ExportProfilesPartialSuccess, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportProfilesPartialSuccess() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportProfilesPartialSuccess(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportProfilesPartialSuccess() + } + } + for i := range src { + CopyExportProfilesPartialSuccess(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExportProfilesPartialSuccess) Reset() { + *orig = ExportProfilesPartialSuccess{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExportProfilesPartialSuccess) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.RejectedProfiles != int64(0) { + dest.WriteObjectField("rejectedProfiles") + dest.WriteInt64(orig.RejectedProfiles) + } + if orig.ErrorMessage != "" { + dest.WriteObjectField("errorMessage") + dest.WriteString(orig.ErrorMessage) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExportProfilesPartialSuccess) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "rejectedProfiles", "rejected_profiles": + orig.RejectedProfiles = iter.ReadInt64() + case "errorMessage", "error_message": + orig.ErrorMessage = iter.ReadString() + default: + iter.Skip() + } + } +} + +func (orig *ExportProfilesPartialSuccess) SizeProto() int { + var n int + var l int + _ = l + if orig.RejectedProfiles != int64(0) { + n += 1 + proto.Sov(uint64(orig.RejectedProfiles)) + } + + l = len(orig.ErrorMessage) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ExportProfilesPartialSuccess) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + if orig.RejectedProfiles != int64(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedProfiles)) + pos-- + buf[pos] = 0x8 + } + l = len(orig.ErrorMessage) + if l > 0 { + pos -= l + copy(buf[pos:], orig.ErrorMessage) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + return len(buf) - pos +} + +func (orig *ExportProfilesPartialSuccess) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field RejectedProfiles", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.RejectedProfiles = int64(num) + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ErrorMessage = string(buf[startPos:pos]) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExportProfilesPartialSuccess() *ExportProfilesPartialSuccess { + orig := NewExportProfilesPartialSuccess() + orig.RejectedProfiles = int64(13) + orig.ErrorMessage = "test_errormessage" + return orig +} + +func GenTestExportProfilesPartialSuccessPtrSlice() []*ExportProfilesPartialSuccess { + orig := make([]*ExportProfilesPartialSuccess, 5) + orig[0] = NewExportProfilesPartialSuccess() + orig[1] = GenTestExportProfilesPartialSuccess() + orig[2] = NewExportProfilesPartialSuccess() + orig[3] = GenTestExportProfilesPartialSuccess() + orig[4] = NewExportProfilesPartialSuccess() + return orig +} + +func GenTestExportProfilesPartialSuccessSlice() []ExportProfilesPartialSuccess { + orig := make([]ExportProfilesPartialSuccess, 5) + orig[1] = *GenTestExportProfilesPartialSuccess() + orig[3] = *GenTestExportProfilesPartialSuccess() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesservicerequest.go new file mode 100644 index 00000000000..ba9ecbc4eed --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesservicerequest.go @@ -0,0 +1,278 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// Profiles is the top-level struct that is propagated through the profiles pipeline. +// Use NewProfiles to create new instance, zero-initialized instance is not valid for use. +type ExportProfilesServiceRequest struct { + ResourceProfiles []*ResourceProfiles + Dictionary ProfilesDictionary +} + +var ( + protoPoolExportProfilesServiceRequest = sync.Pool{ + New: func() any { + return &ExportProfilesServiceRequest{} + }, + } +) + +func NewExportProfilesServiceRequest() *ExportProfilesServiceRequest { + if !UseProtoPooling.IsEnabled() { + return &ExportProfilesServiceRequest{} + } + return protoPoolExportProfilesServiceRequest.Get().(*ExportProfilesServiceRequest) +} + +func DeleteExportProfilesServiceRequest(orig *ExportProfilesServiceRequest, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.ResourceProfiles { + DeleteResourceProfiles(orig.ResourceProfiles[i], true) + } + DeleteProfilesDictionary(&orig.Dictionary, false) + orig.Reset() + if nullable { + protoPoolExportProfilesServiceRequest.Put(orig) + } +} + +func CopyExportProfilesServiceRequest(dest, src *ExportProfilesServiceRequest) *ExportProfilesServiceRequest { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExportProfilesServiceRequest() + } + dest.ResourceProfiles = CopyResourceProfilesPtrSlice(dest.ResourceProfiles, src.ResourceProfiles) + + CopyProfilesDictionary(&dest.Dictionary, &src.Dictionary) + + return dest +} + +func CopyExportProfilesServiceRequestSlice(dest, src []ExportProfilesServiceRequest) []ExportProfilesServiceRequest { + var newDest []ExportProfilesServiceRequest + if cap(dest) < len(src) { + newDest = make([]ExportProfilesServiceRequest, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportProfilesServiceRequest(&dest[i], false) + } + } + for i := range src { + CopyExportProfilesServiceRequest(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExportProfilesServiceRequestPtrSlice(dest, src []*ExportProfilesServiceRequest) []*ExportProfilesServiceRequest { + var newDest []*ExportProfilesServiceRequest + if cap(dest) < len(src) { + newDest = make([]*ExportProfilesServiceRequest, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportProfilesServiceRequest() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportProfilesServiceRequest(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportProfilesServiceRequest() + } + } + for i := range src { + CopyExportProfilesServiceRequest(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExportProfilesServiceRequest) Reset() { + *orig = ExportProfilesServiceRequest{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExportProfilesServiceRequest) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.ResourceProfiles) > 0 { + dest.WriteObjectField("resourceProfiles") + dest.WriteArrayStart() + orig.ResourceProfiles[0].MarshalJSON(dest) + for i := 1; i < len(orig.ResourceProfiles); i++ { + dest.WriteMore() + orig.ResourceProfiles[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectField("dictionary") + orig.Dictionary.MarshalJSON(dest) + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExportProfilesServiceRequest) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "resourceProfiles", "resource_profiles": + for iter.ReadArray() { + orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles()) + orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalJSON(iter) + } + + case "dictionary": + + orig.Dictionary.UnmarshalJSON(iter) + default: + iter.Skip() + } + } +} + +func (orig *ExportProfilesServiceRequest) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.ResourceProfiles { + l = orig.ResourceProfiles[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + l = orig.Dictionary.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + return n +} + +func (orig *ExportProfilesServiceRequest) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.ResourceProfiles) - 1; i >= 0; i-- { + l = orig.ResourceProfiles[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + l = orig.Dictionary.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + + return len(buf) - pos +} + +func (orig *ExportProfilesServiceRequest) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles()) + err = orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.Dictionary.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExportProfilesServiceRequest() *ExportProfilesServiceRequest { + orig := NewExportProfilesServiceRequest() + orig.ResourceProfiles = []*ResourceProfiles{{}, GenTestResourceProfiles()} + orig.Dictionary = *GenTestProfilesDictionary() + return orig +} + +func GenTestExportProfilesServiceRequestPtrSlice() []*ExportProfilesServiceRequest { + orig := make([]*ExportProfilesServiceRequest, 5) + orig[0] = NewExportProfilesServiceRequest() + orig[1] = GenTestExportProfilesServiceRequest() + orig[2] = NewExportProfilesServiceRequest() + orig[3] = GenTestExportProfilesServiceRequest() + orig[4] = NewExportProfilesServiceRequest() + return orig +} + +func GenTestExportProfilesServiceRequestSlice() []ExportProfilesServiceRequest { + orig := make([]ExportProfilesServiceRequest, 5) + orig[1] = *GenTestExportProfilesServiceRequest() + orig[3] = *GenTestExportProfilesServiceRequest() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesserviceresponse.go new file mode 100644 index 00000000000..e8fea55ea3a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesserviceresponse.go @@ -0,0 +1,227 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ExportResponse represents the response for gRPC/HTTP client/server. +type ExportProfilesServiceResponse struct { + PartialSuccess ExportProfilesPartialSuccess +} + +var ( + protoPoolExportProfilesServiceResponse = sync.Pool{ + New: func() any { + return &ExportProfilesServiceResponse{} + }, + } +) + +func NewExportProfilesServiceResponse() *ExportProfilesServiceResponse { + if !UseProtoPooling.IsEnabled() { + return &ExportProfilesServiceResponse{} + } + return protoPoolExportProfilesServiceResponse.Get().(*ExportProfilesServiceResponse) +} + +func DeleteExportProfilesServiceResponse(orig *ExportProfilesServiceResponse, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteExportProfilesPartialSuccess(&orig.PartialSuccess, false) + orig.Reset() + if nullable { + protoPoolExportProfilesServiceResponse.Put(orig) + } +} + +func CopyExportProfilesServiceResponse(dest, src *ExportProfilesServiceResponse) *ExportProfilesServiceResponse { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExportProfilesServiceResponse() + } + CopyExportProfilesPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess) + + return dest +} + +func CopyExportProfilesServiceResponseSlice(dest, src []ExportProfilesServiceResponse) []ExportProfilesServiceResponse { + var newDest []ExportProfilesServiceResponse + if cap(dest) < len(src) { + newDest = make([]ExportProfilesServiceResponse, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportProfilesServiceResponse(&dest[i], false) + } + } + for i := range src { + CopyExportProfilesServiceResponse(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExportProfilesServiceResponsePtrSlice(dest, src []*ExportProfilesServiceResponse) []*ExportProfilesServiceResponse { + var newDest []*ExportProfilesServiceResponse + if cap(dest) < len(src) { + newDest = make([]*ExportProfilesServiceResponse, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportProfilesServiceResponse() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportProfilesServiceResponse(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportProfilesServiceResponse() + } + } + for i := range src { + CopyExportProfilesServiceResponse(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExportProfilesServiceResponse) Reset() { + *orig = ExportProfilesServiceResponse{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExportProfilesServiceResponse) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + dest.WriteObjectField("partialSuccess") + orig.PartialSuccess.MarshalJSON(dest) + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExportProfilesServiceResponse) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "partialSuccess", "partial_success": + + orig.PartialSuccess.UnmarshalJSON(iter) + default: + iter.Skip() + } + } +} + +func (orig *ExportProfilesServiceResponse) SizeProto() int { + var n int + var l int + _ = l + l = orig.PartialSuccess.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + return n +} + +func (orig *ExportProfilesServiceResponse) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.PartialSuccess.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + return len(buf) - pos +} + +func (orig *ExportProfilesServiceResponse) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExportProfilesServiceResponse() *ExportProfilesServiceResponse { + orig := NewExportProfilesServiceResponse() + orig.PartialSuccess = *GenTestExportProfilesPartialSuccess() + return orig +} + +func GenTestExportProfilesServiceResponsePtrSlice() []*ExportProfilesServiceResponse { + orig := make([]*ExportProfilesServiceResponse, 5) + orig[0] = NewExportProfilesServiceResponse() + orig[1] = GenTestExportProfilesServiceResponse() + orig[2] = NewExportProfilesServiceResponse() + orig[3] = GenTestExportProfilesServiceResponse() + orig[4] = NewExportProfilesServiceResponse() + return orig +} + +func GenTestExportProfilesServiceResponseSlice() []ExportProfilesServiceResponse { + orig := make([]ExportProfilesServiceResponse, 5) + orig[1] = *GenTestExportProfilesServiceResponse() + orig[3] = *GenTestExportProfilesServiceResponse() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttracepartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttracepartialsuccess.go new file mode 100644 index 00000000000..6df4657fa29 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttracepartialsuccess.go @@ -0,0 +1,257 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ExportPartialSuccess represents the details of a partially successful export request. +type ExportTracePartialSuccess struct { + ErrorMessage string + RejectedSpans int64 +} + +var ( + protoPoolExportTracePartialSuccess = sync.Pool{ + New: func() any { + return &ExportTracePartialSuccess{} + }, + } +) + +func NewExportTracePartialSuccess() *ExportTracePartialSuccess { + if !UseProtoPooling.IsEnabled() { + return &ExportTracePartialSuccess{} + } + return protoPoolExportTracePartialSuccess.Get().(*ExportTracePartialSuccess) +} + +func DeleteExportTracePartialSuccess(orig *ExportTracePartialSuccess, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + orig.Reset() + if nullable { + protoPoolExportTracePartialSuccess.Put(orig) + } +} + +func CopyExportTracePartialSuccess(dest, src *ExportTracePartialSuccess) *ExportTracePartialSuccess { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExportTracePartialSuccess() + } + dest.RejectedSpans = src.RejectedSpans + dest.ErrorMessage = src.ErrorMessage + + return dest +} + +func CopyExportTracePartialSuccessSlice(dest, src []ExportTracePartialSuccess) []ExportTracePartialSuccess { + var newDest []ExportTracePartialSuccess + if cap(dest) < len(src) { + newDest = make([]ExportTracePartialSuccess, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportTracePartialSuccess(&dest[i], false) + } + } + for i := range src { + CopyExportTracePartialSuccess(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExportTracePartialSuccessPtrSlice(dest, src []*ExportTracePartialSuccess) []*ExportTracePartialSuccess { + var newDest []*ExportTracePartialSuccess + if cap(dest) < len(src) { + newDest = make([]*ExportTracePartialSuccess, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportTracePartialSuccess() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportTracePartialSuccess(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportTracePartialSuccess() + } + } + for i := range src { + CopyExportTracePartialSuccess(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExportTracePartialSuccess) Reset() { + *orig = ExportTracePartialSuccess{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExportTracePartialSuccess) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.RejectedSpans != int64(0) { + dest.WriteObjectField("rejectedSpans") + dest.WriteInt64(orig.RejectedSpans) + } + if orig.ErrorMessage != "" { + dest.WriteObjectField("errorMessage") + dest.WriteString(orig.ErrorMessage) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExportTracePartialSuccess) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "rejectedSpans", "rejected_spans": + orig.RejectedSpans = iter.ReadInt64() + case "errorMessage", "error_message": + orig.ErrorMessage = iter.ReadString() + default: + iter.Skip() + } + } +} + +func (orig *ExportTracePartialSuccess) SizeProto() int { + var n int + var l int + _ = l + if orig.RejectedSpans != int64(0) { + n += 1 + proto.Sov(uint64(orig.RejectedSpans)) + } + + l = len(orig.ErrorMessage) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ExportTracePartialSuccess) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + if orig.RejectedSpans != int64(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedSpans)) + pos-- + buf[pos] = 0x8 + } + l = len(orig.ErrorMessage) + if l > 0 { + pos -= l + copy(buf[pos:], orig.ErrorMessage) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + return len(buf) - pos +} + +func (orig *ExportTracePartialSuccess) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field RejectedSpans", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.RejectedSpans = int64(num) + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ErrorMessage = string(buf[startPos:pos]) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExportTracePartialSuccess() *ExportTracePartialSuccess { + orig := NewExportTracePartialSuccess() + orig.RejectedSpans = int64(13) + orig.ErrorMessage = "test_errormessage" + return orig +} + +func GenTestExportTracePartialSuccessPtrSlice() []*ExportTracePartialSuccess { + orig := make([]*ExportTracePartialSuccess, 5) + orig[0] = NewExportTracePartialSuccess() + orig[1] = GenTestExportTracePartialSuccess() + orig[2] = NewExportTracePartialSuccess() + orig[3] = GenTestExportTracePartialSuccess() + orig[4] = NewExportTracePartialSuccess() + return orig +} + +func GenTestExportTracePartialSuccessSlice() []ExportTracePartialSuccess { + orig := make([]ExportTracePartialSuccess, 5) + orig[1] = *GenTestExportTracePartialSuccess() + orig[3] = *GenTestExportTracePartialSuccess() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceservicerequest.go new file mode 100644 index 00000000000..2a4ff9ef34f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceservicerequest.go @@ -0,0 +1,244 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// Traces is the top-level struct that is propagated through the traces pipeline. +// Use NewTraces to create new instance, zero-initialized instance is not valid for use. +type ExportTraceServiceRequest struct { + ResourceSpans []*ResourceSpans +} + +var ( + protoPoolExportTraceServiceRequest = sync.Pool{ + New: func() any { + return &ExportTraceServiceRequest{} + }, + } +) + +func NewExportTraceServiceRequest() *ExportTraceServiceRequest { + if !UseProtoPooling.IsEnabled() { + return &ExportTraceServiceRequest{} + } + return protoPoolExportTraceServiceRequest.Get().(*ExportTraceServiceRequest) +} + +func DeleteExportTraceServiceRequest(orig *ExportTraceServiceRequest, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.ResourceSpans { + DeleteResourceSpans(orig.ResourceSpans[i], true) + } + orig.Reset() + if nullable { + protoPoolExportTraceServiceRequest.Put(orig) + } +} + +func CopyExportTraceServiceRequest(dest, src *ExportTraceServiceRequest) *ExportTraceServiceRequest { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExportTraceServiceRequest() + } + dest.ResourceSpans = CopyResourceSpansPtrSlice(dest.ResourceSpans, src.ResourceSpans) + + return dest +} + +func CopyExportTraceServiceRequestSlice(dest, src []ExportTraceServiceRequest) []ExportTraceServiceRequest { + var newDest []ExportTraceServiceRequest + if cap(dest) < len(src) { + newDest = make([]ExportTraceServiceRequest, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportTraceServiceRequest(&dest[i], false) + } + } + for i := range src { + CopyExportTraceServiceRequest(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExportTraceServiceRequestPtrSlice(dest, src []*ExportTraceServiceRequest) []*ExportTraceServiceRequest { + var newDest []*ExportTraceServiceRequest + if cap(dest) < len(src) { + newDest = make([]*ExportTraceServiceRequest, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportTraceServiceRequest() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportTraceServiceRequest(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportTraceServiceRequest() + } + } + for i := range src { + CopyExportTraceServiceRequest(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExportTraceServiceRequest) Reset() { + *orig = ExportTraceServiceRequest{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExportTraceServiceRequest) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.ResourceSpans) > 0 { + dest.WriteObjectField("resourceSpans") + dest.WriteArrayStart() + orig.ResourceSpans[0].MarshalJSON(dest) + for i := 1; i < len(orig.ResourceSpans); i++ { + dest.WriteMore() + orig.ResourceSpans[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExportTraceServiceRequest) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "resourceSpans", "resource_spans": + for iter.ReadArray() { + orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans()) + orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *ExportTraceServiceRequest) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.ResourceSpans { + l = orig.ResourceSpans[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ExportTraceServiceRequest) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.ResourceSpans) - 1; i >= 0; i-- { + l = orig.ResourceSpans[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + return len(buf) - pos +} + +func (orig *ExportTraceServiceRequest) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans()) + err = orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExportTraceServiceRequest() *ExportTraceServiceRequest { + orig := NewExportTraceServiceRequest() + orig.ResourceSpans = []*ResourceSpans{{}, GenTestResourceSpans()} + return orig +} + +func GenTestExportTraceServiceRequestPtrSlice() []*ExportTraceServiceRequest { + orig := make([]*ExportTraceServiceRequest, 5) + orig[0] = NewExportTraceServiceRequest() + orig[1] = GenTestExportTraceServiceRequest() + orig[2] = NewExportTraceServiceRequest() + orig[3] = GenTestExportTraceServiceRequest() + orig[4] = NewExportTraceServiceRequest() + return orig +} + +func GenTestExportTraceServiceRequestSlice() []ExportTraceServiceRequest { + orig := make([]ExportTraceServiceRequest, 5) + orig[1] = *GenTestExportTraceServiceRequest() + orig[3] = *GenTestExportTraceServiceRequest() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceserviceresponse.go new file mode 100644 index 00000000000..f3d35d4b9be --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceserviceresponse.go @@ -0,0 +1,227 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ExportResponse represents the response for gRPC/HTTP client/server. +type ExportTraceServiceResponse struct { + PartialSuccess ExportTracePartialSuccess +} + +var ( + protoPoolExportTraceServiceResponse = sync.Pool{ + New: func() any { + return &ExportTraceServiceResponse{} + }, + } +) + +func NewExportTraceServiceResponse() *ExportTraceServiceResponse { + if !UseProtoPooling.IsEnabled() { + return &ExportTraceServiceResponse{} + } + return protoPoolExportTraceServiceResponse.Get().(*ExportTraceServiceResponse) +} + +func DeleteExportTraceServiceResponse(orig *ExportTraceServiceResponse, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteExportTracePartialSuccess(&orig.PartialSuccess, false) + orig.Reset() + if nullable { + protoPoolExportTraceServiceResponse.Put(orig) + } +} + +func CopyExportTraceServiceResponse(dest, src *ExportTraceServiceResponse) *ExportTraceServiceResponse { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewExportTraceServiceResponse() + } + CopyExportTracePartialSuccess(&dest.PartialSuccess, &src.PartialSuccess) + + return dest +} + +func CopyExportTraceServiceResponseSlice(dest, src []ExportTraceServiceResponse) []ExportTraceServiceResponse { + var newDest []ExportTraceServiceResponse + if cap(dest) < len(src) { + newDest = make([]ExportTraceServiceResponse, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportTraceServiceResponse(&dest[i], false) + } + } + for i := range src { + CopyExportTraceServiceResponse(&newDest[i], &src[i]) + } + return newDest +} + +func CopyExportTraceServiceResponsePtrSlice(dest, src []*ExportTraceServiceResponse) []*ExportTraceServiceResponse { + var newDest []*ExportTraceServiceResponse + if cap(dest) < len(src) { + newDest = make([]*ExportTraceServiceResponse, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportTraceServiceResponse() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteExportTraceServiceResponse(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewExportTraceServiceResponse() + } + } + for i := range src { + CopyExportTraceServiceResponse(newDest[i], src[i]) + } + return newDest +} + +func (orig *ExportTraceServiceResponse) Reset() { + *orig = ExportTraceServiceResponse{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ExportTraceServiceResponse) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + dest.WriteObjectField("partialSuccess") + orig.PartialSuccess.MarshalJSON(dest) + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ExportTraceServiceResponse) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "partialSuccess", "partial_success": + + orig.PartialSuccess.UnmarshalJSON(iter) + default: + iter.Skip() + } + } +} + +func (orig *ExportTraceServiceResponse) SizeProto() int { + var n int + var l int + _ = l + l = orig.PartialSuccess.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + return n +} + +func (orig *ExportTraceServiceResponse) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.PartialSuccess.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + return len(buf) - pos +} + +func (orig *ExportTraceServiceResponse) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestExportTraceServiceResponse() *ExportTraceServiceResponse { + orig := NewExportTraceServiceResponse() + orig.PartialSuccess = *GenTestExportTracePartialSuccess() + return orig +} + +func GenTestExportTraceServiceResponsePtrSlice() []*ExportTraceServiceResponse { + orig := make([]*ExportTraceServiceResponse, 5) + orig[0] = NewExportTraceServiceResponse() + orig[1] = GenTestExportTraceServiceResponse() + orig[2] = NewExportTraceServiceResponse() + orig[3] = GenTestExportTraceServiceResponse() + orig[4] = NewExportTraceServiceResponse() + return orig +} + +func GenTestExportTraceServiceResponseSlice() []ExportTraceServiceResponse { + orig := make([]ExportTraceServiceResponse, 5) + orig[1] = *GenTestExportTraceServiceResponse() + orig[3] = *GenTestExportTraceServiceResponse() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_function.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_function.go similarity index 57% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_function.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_function.go index 2999e35c2d2..06bc218b170 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_function.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_function.go @@ -10,27 +10,34 @@ import ( "fmt" "sync" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// Function describes a function, including its human-readable name, system name, source file, and starting line number in the source. +type Function struct { + NameStrindex int32 + SystemNameStrindex int32 + FilenameStrindex int32 + StartLine int64 +} + var ( protoPoolFunction = sync.Pool{ New: func() any { - return &otlpprofiles.Function{} + return &Function{} }, } ) -func NewOrigFunction() *otlpprofiles.Function { +func NewFunction() *Function { if !UseProtoPooling.IsEnabled() { - return &otlpprofiles.Function{} + return &Function{} } - return protoPoolFunction.Get().(*otlpprofiles.Function) + return protoPoolFunction.Get().(*Function) } -func DeleteOrigFunction(orig *otlpprofiles.Function, nullable bool) { +func DeleteFunction(orig *Function, nullable bool) { if orig == nil { return } @@ -46,28 +53,81 @@ func DeleteOrigFunction(orig *otlpprofiles.Function, nullable bool) { } } -func CopyOrigFunction(dest, src *otlpprofiles.Function) { +func CopyFunction(dest, src *Function) *Function { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewFunction() } dest.NameStrindex = src.NameStrindex dest.SystemNameStrindex = src.SystemNameStrindex dest.FilenameStrindex = src.FilenameStrindex dest.StartLine = src.StartLine + + return dest } -func GenTestOrigFunction() *otlpprofiles.Function { - orig := NewOrigFunction() - orig.NameStrindex = int32(13) - orig.SystemNameStrindex = int32(13) - orig.FilenameStrindex = int32(13) - orig.StartLine = int64(13) - return orig +func CopyFunctionSlice(dest, src []Function) []Function { + var newDest []Function + if cap(dest) < len(src) { + newDest = make([]Function, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteFunction(&dest[i], false) + } + } + for i := range src { + CopyFunction(&newDest[i], &src[i]) + } + return newDest +} + +func CopyFunctionPtrSlice(dest, src []*Function) []*Function { + var newDest []*Function + if cap(dest) < len(src) { + newDest = make([]*Function, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewFunction() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteFunction(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewFunction() + } + } + for i := range src { + CopyFunction(newDest[i], src[i]) + } + return newDest +} + +func (orig *Function) Reset() { + *orig = Function{} } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigFunction(orig *otlpprofiles.Function, dest *json.Stream) { +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Function) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() if orig.NameStrindex != int32(0) { dest.WriteObjectField("nameStrindex") @@ -88,8 +148,8 @@ func MarshalJSONOrigFunction(orig *otlpprofiles.Function, dest *json.Stream) { dest.WriteObjectEnd() } -// UnmarshalJSONOrigFunction unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigFunction(orig *otlpprofiles.Function, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Function) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "nameStrindex", "name_strindex": @@ -106,45 +166,45 @@ func UnmarshalJSONOrigFunction(orig *otlpprofiles.Function, iter *json.Iterator) } } -func SizeProtoOrigFunction(orig *otlpprofiles.Function) int { +func (orig *Function) SizeProto() int { var n int var l int _ = l - if orig.NameStrindex != 0 { + if orig.NameStrindex != int32(0) { n += 1 + proto.Sov(uint64(orig.NameStrindex)) } - if orig.SystemNameStrindex != 0 { + if orig.SystemNameStrindex != int32(0) { n += 1 + proto.Sov(uint64(orig.SystemNameStrindex)) } - if orig.FilenameStrindex != 0 { + if orig.FilenameStrindex != int32(0) { n += 1 + proto.Sov(uint64(orig.FilenameStrindex)) } - if orig.StartLine != 0 { + if orig.StartLine != int64(0) { n += 1 + proto.Sov(uint64(orig.StartLine)) } return n } -func MarshalProtoOrigFunction(orig *otlpprofiles.Function, buf []byte) int { +func (orig *Function) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l - if orig.NameStrindex != 0 { + if orig.NameStrindex != int32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.NameStrindex)) pos-- buf[pos] = 0x8 } - if orig.SystemNameStrindex != 0 { + if orig.SystemNameStrindex != int32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.SystemNameStrindex)) pos-- buf[pos] = 0x10 } - if orig.FilenameStrindex != 0 { + if orig.FilenameStrindex != int32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.FilenameStrindex)) pos-- buf[pos] = 0x18 } - if orig.StartLine != 0 { + if orig.StartLine != int64(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.StartLine)) pos-- buf[pos] = 0x20 @@ -152,7 +212,7 @@ func MarshalProtoOrigFunction(orig *otlpprofiles.Function, buf []byte) int { return len(buf) - pos } -func UnmarshalProtoOrigFunction(orig *otlpprofiles.Function, buf []byte) error { +func (orig *Function) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -176,7 +236,6 @@ func UnmarshalProtoOrigFunction(orig *otlpprofiles.Function, buf []byte) error { if err != nil { return err } - orig.NameStrindex = int32(num) case 2: @@ -188,7 +247,6 @@ func UnmarshalProtoOrigFunction(orig *otlpprofiles.Function, buf []byte) error { if err != nil { return err } - orig.SystemNameStrindex = int32(num) case 3: @@ -200,7 +258,6 @@ func UnmarshalProtoOrigFunction(orig *otlpprofiles.Function, buf []byte) error { if err != nil { return err } - orig.FilenameStrindex = int32(num) case 4: @@ -212,7 +269,6 @@ func UnmarshalProtoOrigFunction(orig *otlpprofiles.Function, buf []byte) error { if err != nil { return err } - orig.StartLine = int64(num) default: pos, err = proto.ConsumeUnknown(buf, pos, wireType) @@ -223,3 +279,29 @@ func UnmarshalProtoOrigFunction(orig *otlpprofiles.Function, buf []byte) error { } return nil } + +func GenTestFunction() *Function { + orig := NewFunction() + orig.NameStrindex = int32(13) + orig.SystemNameStrindex = int32(13) + orig.FilenameStrindex = int32(13) + orig.StartLine = int64(13) + return orig +} + +func GenTestFunctionPtrSlice() []*Function { + orig := make([]*Function, 5) + orig[0] = NewFunction() + orig[1] = GenTestFunction() + orig[2] = NewFunction() + orig[3] = GenTestFunction() + orig[4] = NewFunction() + return orig +} + +func GenTestFunctionSlice() []Function { + orig := make([]Function, 5) + orig[1] = *GenTestFunction() + orig[3] = *GenTestFunction() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_gauge.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_gauge.go new file mode 100644 index 00000000000..996d6624253 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_gauge.go @@ -0,0 +1,243 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// Gauge represents the type of a numeric metric that always exports the "current value" for every data point. +type Gauge struct { + DataPoints []*NumberDataPoint +} + +var ( + protoPoolGauge = sync.Pool{ + New: func() any { + return &Gauge{} + }, + } +) + +func NewGauge() *Gauge { + if !UseProtoPooling.IsEnabled() { + return &Gauge{} + } + return protoPoolGauge.Get().(*Gauge) +} + +func DeleteGauge(orig *Gauge, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.DataPoints { + DeleteNumberDataPoint(orig.DataPoints[i], true) + } + orig.Reset() + if nullable { + protoPoolGauge.Put(orig) + } +} + +func CopyGauge(dest, src *Gauge) *Gauge { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewGauge() + } + dest.DataPoints = CopyNumberDataPointPtrSlice(dest.DataPoints, src.DataPoints) + + return dest +} + +func CopyGaugeSlice(dest, src []Gauge) []Gauge { + var newDest []Gauge + if cap(dest) < len(src) { + newDest = make([]Gauge, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteGauge(&dest[i], false) + } + } + for i := range src { + CopyGauge(&newDest[i], &src[i]) + } + return newDest +} + +func CopyGaugePtrSlice(dest, src []*Gauge) []*Gauge { + var newDest []*Gauge + if cap(dest) < len(src) { + newDest = make([]*Gauge, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewGauge() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteGauge(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewGauge() + } + } + for i := range src { + CopyGauge(newDest[i], src[i]) + } + return newDest +} + +func (orig *Gauge) Reset() { + *orig = Gauge{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Gauge) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.DataPoints) > 0 { + dest.WriteObjectField("dataPoints") + dest.WriteArrayStart() + orig.DataPoints[0].MarshalJSON(dest) + for i := 1; i < len(orig.DataPoints); i++ { + dest.WriteMore() + orig.DataPoints[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Gauge) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "dataPoints", "data_points": + for iter.ReadArray() { + orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint()) + orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *Gauge) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.DataPoints { + l = orig.DataPoints[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *Gauge) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.DataPoints) - 1; i >= 0; i-- { + l = orig.DataPoints[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + return len(buf) - pos +} + +func (orig *Gauge) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint()) + err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestGauge() *Gauge { + orig := NewGauge() + orig.DataPoints = []*NumberDataPoint{{}, GenTestNumberDataPoint()} + return orig +} + +func GenTestGaugePtrSlice() []*Gauge { + orig := make([]*Gauge, 5) + orig[0] = NewGauge() + orig[1] = GenTestGauge() + orig[2] = NewGauge() + orig[3] = GenTestGauge() + orig[4] = NewGauge() + return orig +} + +func GenTestGaugeSlice() []Gauge { + orig := make([]Gauge, 5) + orig[1] = *GenTestGauge() + orig[3] = *GenTestGauge() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogram.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogram.go new file mode 100644 index 00000000000..09fab65bb80 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogram.go @@ -0,0 +1,274 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval. +type Histogram struct { + DataPoints []*HistogramDataPoint + AggregationTemporality AggregationTemporality +} + +var ( + protoPoolHistogram = sync.Pool{ + New: func() any { + return &Histogram{} + }, + } +) + +func NewHistogram() *Histogram { + if !UseProtoPooling.IsEnabled() { + return &Histogram{} + } + return protoPoolHistogram.Get().(*Histogram) +} + +func DeleteHistogram(orig *Histogram, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.DataPoints { + DeleteHistogramDataPoint(orig.DataPoints[i], true) + } + + orig.Reset() + if nullable { + protoPoolHistogram.Put(orig) + } +} + +func CopyHistogram(dest, src *Histogram) *Histogram { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewHistogram() + } + dest.DataPoints = CopyHistogramDataPointPtrSlice(dest.DataPoints, src.DataPoints) + + dest.AggregationTemporality = src.AggregationTemporality + + return dest +} + +func CopyHistogramSlice(dest, src []Histogram) []Histogram { + var newDest []Histogram + if cap(dest) < len(src) { + newDest = make([]Histogram, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteHistogram(&dest[i], false) + } + } + for i := range src { + CopyHistogram(&newDest[i], &src[i]) + } + return newDest +} + +func CopyHistogramPtrSlice(dest, src []*Histogram) []*Histogram { + var newDest []*Histogram + if cap(dest) < len(src) { + newDest = make([]*Histogram, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewHistogram() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteHistogram(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewHistogram() + } + } + for i := range src { + CopyHistogram(newDest[i], src[i]) + } + return newDest +} + +func (orig *Histogram) Reset() { + *orig = Histogram{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Histogram) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.DataPoints) > 0 { + dest.WriteObjectField("dataPoints") + dest.WriteArrayStart() + orig.DataPoints[0].MarshalJSON(dest) + for i := 1; i < len(orig.DataPoints); i++ { + dest.WriteMore() + orig.DataPoints[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + + if int32(orig.AggregationTemporality) != 0 { + dest.WriteObjectField("aggregationTemporality") + dest.WriteInt32(int32(orig.AggregationTemporality)) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Histogram) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "dataPoints", "data_points": + for iter.ReadArray() { + orig.DataPoints = append(orig.DataPoints, NewHistogramDataPoint()) + orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter) + } + + case "aggregationTemporality", "aggregation_temporality": + orig.AggregationTemporality = AggregationTemporality(iter.ReadEnumValue(AggregationTemporality_value)) + default: + iter.Skip() + } + } +} + +func (orig *Histogram) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.DataPoints { + l = orig.DataPoints[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + if orig.AggregationTemporality != AggregationTemporality(0) { + n += 1 + proto.Sov(uint64(orig.AggregationTemporality)) + } + return n +} + +func (orig *Histogram) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.DataPoints) - 1; i >= 0; i-- { + l = orig.DataPoints[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + if orig.AggregationTemporality != AggregationTemporality(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality)) + pos-- + buf[pos] = 0x10 + } + return len(buf) - pos +} + +func (orig *Histogram) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.DataPoints = append(orig.DataPoints, NewHistogramDataPoint()) + err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.AggregationTemporality = AggregationTemporality(num) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestHistogram() *Histogram { + orig := NewHistogram() + orig.DataPoints = []*HistogramDataPoint{{}, GenTestHistogramDataPoint()} + orig.AggregationTemporality = AggregationTemporality(13) + return orig +} + +func GenTestHistogramPtrSlice() []*Histogram { + orig := make([]*Histogram, 5) + orig[0] = NewHistogram() + orig[1] = GenTestHistogram() + orig[2] = NewHistogram() + orig[3] = GenTestHistogram() + orig[4] = NewHistogram() + return orig +} + +func GenTestHistogramSlice() []Histogram { + orig := make([]Histogram, 5) + orig[1] = *GenTestHistogram() + orig[3] = *GenTestHistogram() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogramdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogramdatapoint.go similarity index 57% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogramdatapoint.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogramdatapoint.go index baffb2e190d..b43e3cbb8fb 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogramdatapoint.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogramdatapoint.go @@ -12,45 +12,42 @@ import ( "math" "sync" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// HistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of values. +type HistogramDataPoint struct { + Attributes []KeyValue + BucketCounts []uint64 + ExplicitBounds []float64 + Exemplars []Exemplar + StartTimeUnixNano uint64 + TimeUnixNano uint64 + Count uint64 + Sum float64 + Min float64 + Max float64 + metadata [1]uint64 + Flags uint32 +} + var ( protoPoolHistogramDataPoint = sync.Pool{ New: func() any { - return &otlpmetrics.HistogramDataPoint{} - }, - } - ProtoPoolHistogramDataPoint_Sum = sync.Pool{ - New: func() any { - return &otlpmetrics.HistogramDataPoint_Sum{} - }, - } - - ProtoPoolHistogramDataPoint_Min = sync.Pool{ - New: func() any { - return &otlpmetrics.HistogramDataPoint_Min{} - }, - } - - ProtoPoolHistogramDataPoint_Max = sync.Pool{ - New: func() any { - return &otlpmetrics.HistogramDataPoint_Max{} + return &HistogramDataPoint{} }, } ) -func NewOrigHistogramDataPoint() *otlpmetrics.HistogramDataPoint { +func NewHistogramDataPoint() *HistogramDataPoint { if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.HistogramDataPoint{} + return &HistogramDataPoint{} } - return protoPoolHistogramDataPoint.Get().(*otlpmetrics.HistogramDataPoint) + return protoPoolHistogramDataPoint.Get().(*HistogramDataPoint) } -func DeleteOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, nullable bool) { +func DeleteHistogramDataPoint(orig *HistogramDataPoint, nullable bool) { if orig == nil { return } @@ -59,36 +56,12 @@ func DeleteOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, nullable orig.Reset() return } - for i := range orig.Attributes { - DeleteOrigKeyValue(&orig.Attributes[i], false) + DeleteKeyValue(&orig.Attributes[i], false) } - switch ov := orig.Sum_.(type) { - case *otlpmetrics.HistogramDataPoint_Sum: - if UseProtoPooling.IsEnabled() { - ov.Sum = float64(0) - ProtoPoolHistogramDataPoint_Sum.Put(ov) - } - } for i := range orig.Exemplars { - DeleteOrigExemplar(&orig.Exemplars[i], false) - } - switch ov := orig.Min_.(type) { - case *otlpmetrics.HistogramDataPoint_Min: - if UseProtoPooling.IsEnabled() { - ov.Min = float64(0) - ProtoPoolHistogramDataPoint_Min.Put(ov) - } - - } - switch ov := orig.Max_.(type) { - case *otlpmetrics.HistogramDataPoint_Max: - if UseProtoPooling.IsEnabled() { - ov.Max = float64(0) - ProtoPoolHistogramDataPoint_Max.Put(ov) - } - + DeleteExemplar(&orig.Exemplars[i], false) } orig.Reset() @@ -97,77 +70,114 @@ func DeleteOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, nullable } } -func CopyOrigHistogramDataPoint(dest, src *otlpmetrics.HistogramDataPoint) { +func CopyHistogramDataPoint(dest, src *HistogramDataPoint) *HistogramDataPoint { // If copying to same object, just return. if src == dest { - return + return dest } - dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes) + + if src == nil { + return nil + } + + if dest == nil { + dest = NewHistogramDataPoint() + } + dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes) + dest.StartTimeUnixNano = src.StartTimeUnixNano dest.TimeUnixNano = src.TimeUnixNano dest.Count = src.Count - if srcSum, ok := src.Sum_.(*otlpmetrics.HistogramDataPoint_Sum); ok { - destSum, ok := dest.Sum_.(*otlpmetrics.HistogramDataPoint_Sum) - if !ok { - destSum = &otlpmetrics.HistogramDataPoint_Sum{} - dest.Sum_ = destSum - } - destSum.Sum = srcSum.Sum + if src.HasSum() { + dest.SetSum(src.Sum) } else { - dest.Sum_ = nil + dest.RemoveSum() } - dest.BucketCounts = CopyOrigUint64Slice(dest.BucketCounts, src.BucketCounts) - dest.ExplicitBounds = CopyOrigFloat64Slice(dest.ExplicitBounds, src.ExplicitBounds) - dest.Exemplars = CopyOrigExemplarSlice(dest.Exemplars, src.Exemplars) + + dest.BucketCounts = append(dest.BucketCounts[:0], src.BucketCounts...) + + dest.ExplicitBounds = append(dest.ExplicitBounds[:0], src.ExplicitBounds...) + + dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars) + dest.Flags = src.Flags - if srcMin, ok := src.Min_.(*otlpmetrics.HistogramDataPoint_Min); ok { - destMin, ok := dest.Min_.(*otlpmetrics.HistogramDataPoint_Min) - if !ok { - destMin = &otlpmetrics.HistogramDataPoint_Min{} - dest.Min_ = destMin - } - destMin.Min = srcMin.Min + if src.HasMin() { + dest.SetMin(src.Min) + } else { + dest.RemoveMin() + } + + if src.HasMax() { + dest.SetMax(src.Max) } else { - dest.Min_ = nil + dest.RemoveMax() } - if srcMax, ok := src.Max_.(*otlpmetrics.HistogramDataPoint_Max); ok { - destMax, ok := dest.Max_.(*otlpmetrics.HistogramDataPoint_Max) - if !ok { - destMax = &otlpmetrics.HistogramDataPoint_Max{} - dest.Max_ = destMax + + return dest +} + +func CopyHistogramDataPointSlice(dest, src []HistogramDataPoint) []HistogramDataPoint { + var newDest []HistogramDataPoint + if cap(dest) < len(src) { + newDest = make([]HistogramDataPoint, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteHistogramDataPoint(&dest[i], false) + } + } + for i := range src { + CopyHistogramDataPoint(&newDest[i], &src[i]) + } + return newDest +} + +func CopyHistogramDataPointPtrSlice(dest, src []*HistogramDataPoint) []*HistogramDataPoint { + var newDest []*HistogramDataPoint + if cap(dest) < len(src) { + newDest = make([]*HistogramDataPoint, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewHistogramDataPoint() } - destMax.Max = srcMax.Max } else { - dest.Max_ = nil + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteHistogramDataPoint(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewHistogramDataPoint() + } } + for i := range src { + CopyHistogramDataPoint(newDest[i], src[i]) + } + return newDest } -func GenTestOrigHistogramDataPoint() *otlpmetrics.HistogramDataPoint { - orig := NewOrigHistogramDataPoint() - orig.Attributes = GenerateOrigTestKeyValueSlice() - orig.StartTimeUnixNano = 1234567890 - orig.TimeUnixNano = 1234567890 - orig.Count = uint64(13) - orig.Sum_ = &otlpmetrics.HistogramDataPoint_Sum{Sum: float64(3.1415926)} - orig.BucketCounts = GenerateOrigTestUint64Slice() - orig.ExplicitBounds = GenerateOrigTestFloat64Slice() - orig.Exemplars = GenerateOrigTestExemplarSlice() - orig.Flags = 1 - orig.Min_ = &otlpmetrics.HistogramDataPoint_Min{Min: float64(3.1415926)} - orig.Max_ = &otlpmetrics.HistogramDataPoint_Max{Max: float64(3.1415926)} - return orig +func (orig *HistogramDataPoint) Reset() { + *orig = HistogramDataPoint{} } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, dest *json.Stream) { +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *HistogramDataPoint) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() if len(orig.Attributes) > 0 { dest.WriteObjectField("attributes") dest.WriteArrayStart() - MarshalJSONOrigKeyValue(&orig.Attributes[0], dest) + orig.Attributes[0].MarshalJSON(dest) for i := 1; i < len(orig.Attributes); i++ { dest.WriteMore() - MarshalJSONOrigKeyValue(&orig.Attributes[i], dest) + orig.Attributes[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -183,7 +193,7 @@ func MarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, des dest.WriteObjectField("count") dest.WriteUint64(orig.Count) } - if orig, ok := orig.Sum_.(*otlpmetrics.HistogramDataPoint_Sum); ok { + if orig.HasSum() { dest.WriteObjectField("sum") dest.WriteFloat64(orig.Sum) } @@ -197,6 +207,7 @@ func MarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, des } dest.WriteArrayEnd() } + if len(orig.ExplicitBounds) > 0 { dest.WriteObjectField("explicitBounds") dest.WriteArrayStart() @@ -207,13 +218,14 @@ func MarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, des } dest.WriteArrayEnd() } + if len(orig.Exemplars) > 0 { dest.WriteObjectField("exemplars") dest.WriteArrayStart() - MarshalJSONOrigExemplar(&orig.Exemplars[0], dest) + orig.Exemplars[0].MarshalJSON(dest) for i := 1; i < len(orig.Exemplars); i++ { dest.WriteMore() - MarshalJSONOrigExemplar(&orig.Exemplars[i], dest) + orig.Exemplars[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -221,25 +233,25 @@ func MarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, des dest.WriteObjectField("flags") dest.WriteUint32(orig.Flags) } - if orig, ok := orig.Min_.(*otlpmetrics.HistogramDataPoint_Min); ok { + if orig.HasMin() { dest.WriteObjectField("min") dest.WriteFloat64(orig.Min) } - if orig, ok := orig.Max_.(*otlpmetrics.HistogramDataPoint_Max); ok { + if orig.HasMax() { dest.WriteObjectField("max") dest.WriteFloat64(orig.Max) } dest.WriteObjectEnd() } -// UnmarshalJSONOrigHistogramDataPoint unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *HistogramDataPoint) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "attributes": for iter.ReadArray() { - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter) + orig.Attributes = append(orig.Attributes, KeyValue{}) + orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter) } case "startTimeUnixNano", "start_time_unix_nano": @@ -249,16 +261,7 @@ func UnmarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, i case "count": orig.Count = iter.ReadUint64() case "sum": - { - var ov *otlpmetrics.HistogramDataPoint_Sum - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.HistogramDataPoint_Sum{} - } else { - ov = ProtoPoolHistogramDataPoint_Sum.Get().(*otlpmetrics.HistogramDataPoint_Sum) - } - ov.Sum = iter.ReadFloat64() - orig.Sum_ = ov - } + orig.SetSum(iter.ReadFloat64()) case "bucketCounts", "bucket_counts": for iter.ReadArray() { @@ -272,35 +275,17 @@ func UnmarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, i case "exemplars": for iter.ReadArray() { - orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{}) - UnmarshalJSONOrigExemplar(&orig.Exemplars[len(orig.Exemplars)-1], iter) + orig.Exemplars = append(orig.Exemplars, Exemplar{}) + orig.Exemplars[len(orig.Exemplars)-1].UnmarshalJSON(iter) } case "flags": orig.Flags = iter.ReadUint32() case "min": - { - var ov *otlpmetrics.HistogramDataPoint_Min - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.HistogramDataPoint_Min{} - } else { - ov = ProtoPoolHistogramDataPoint_Min.Get().(*otlpmetrics.HistogramDataPoint_Min) - } - ov.Min = iter.ReadFloat64() - orig.Min_ = ov - } + orig.SetMin(iter.ReadFloat64()) case "max": - { - var ov *otlpmetrics.HistogramDataPoint_Max - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.HistogramDataPoint_Max{} - } else { - ov = ProtoPoolHistogramDataPoint_Max.Get().(*otlpmetrics.HistogramDataPoint_Max) - } - ov.Max = iter.ReadFloat64() - orig.Max_ = ov - } + orig.SetMax(iter.ReadFloat64()) default: iter.Skip() @@ -308,25 +293,24 @@ func UnmarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, i } } -func SizeProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint) int { +func (orig *HistogramDataPoint) SizeProto() int { var n int var l int _ = l for i := range orig.Attributes { - l = SizeProtoOrigKeyValue(&orig.Attributes[i]) + l = orig.Attributes[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.StartTimeUnixNano != 0 { + if orig.StartTimeUnixNano != uint64(0) { n += 9 } - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { n += 9 } - if orig.Count != 0 { + if orig.Count != uint64(0) { n += 9 } - if orig, ok := orig.Sum_.(*otlpmetrics.HistogramDataPoint_Sum); ok { - _ = orig + if orig.HasSum() { n += 9 } l = len(orig.BucketCounts) @@ -340,53 +324,51 @@ func SizeProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint) int { n += 1 + proto.Sov(uint64(l)) + l } for i := range orig.Exemplars { - l = SizeProtoOrigExemplar(&orig.Exemplars[i]) + l = orig.Exemplars[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.Flags != 0 { + if orig.Flags != uint32(0) { n += 1 + proto.Sov(uint64(orig.Flags)) } - if orig, ok := orig.Min_.(*otlpmetrics.HistogramDataPoint_Min); ok { - _ = orig + if orig.HasMin() { n += 9 } - if orig, ok := orig.Max_.(*otlpmetrics.HistogramDataPoint_Max); ok { - _ = orig + if orig.HasMax() { n += 9 } return n } -func MarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, buf []byte) int { +func (orig *HistogramDataPoint) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l for i := len(orig.Attributes) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos]) + l = orig.Attributes[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x4a } - if orig.StartTimeUnixNano != 0 { + if orig.StartTimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano)) pos-- buf[pos] = 0x11 } - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano)) pos-- buf[pos] = 0x19 } - if orig.Count != 0 { + if orig.Count != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.Count)) pos-- buf[pos] = 0x21 } - if orig, ok := orig.Sum_.(*otlpmetrics.HistogramDataPoint_Sum); ok { + if orig.HasSum() { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum)) pos-- @@ -413,24 +395,24 @@ func MarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, bu buf[pos] = 0x3a } for i := len(orig.Exemplars) - 1; i >= 0; i-- { - l = MarshalProtoOrigExemplar(&orig.Exemplars[i], buf[:pos]) + l = orig.Exemplars[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x42 } - if orig.Flags != 0 { + if orig.Flags != uint32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags)) pos-- buf[pos] = 0x50 } - if orig, ok := orig.Min_.(*otlpmetrics.HistogramDataPoint_Min); ok { + if orig.HasMin() { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Min)) pos-- buf[pos] = 0x59 } - if orig, ok := orig.Max_.(*otlpmetrics.HistogramDataPoint_Max); ok { + if orig.HasMax() { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Max)) pos-- @@ -439,7 +421,7 @@ func MarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, bu return len(buf) - pos } -func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, buf []byte) error { +func (orig *HistogramDataPoint) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -464,8 +446,8 @@ func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, return err } startPos := pos - length - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos]) + orig.Attributes = append(orig.Attributes, KeyValue{}) + err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -515,14 +497,7 @@ func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, if err != nil { return err } - var ov *otlpmetrics.HistogramDataPoint_Sum - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.HistogramDataPoint_Sum{} - } else { - ov = ProtoPoolHistogramDataPoint_Sum.Get().(*otlpmetrics.HistogramDataPoint_Sum) - } - ov.Sum = math.Float64frombits(num) - orig.Sum_ = ov + orig.SetSum(math.Float64frombits(num)) case 6: switch wireType { case proto.WireTypeLen: @@ -598,8 +573,8 @@ func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, return err } startPos := pos - length - orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{}) - err = UnmarshalProtoOrigExemplar(&orig.Exemplars[len(orig.Exemplars)-1], buf[startPos:pos]) + orig.Exemplars = append(orig.Exemplars, Exemplar{}) + err = orig.Exemplars[len(orig.Exemplars)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -613,7 +588,6 @@ func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, if err != nil { return err } - orig.Flags = uint32(num) case 11: @@ -625,14 +599,7 @@ func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, if err != nil { return err } - var ov *otlpmetrics.HistogramDataPoint_Min - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.HistogramDataPoint_Min{} - } else { - ov = ProtoPoolHistogramDataPoint_Min.Get().(*otlpmetrics.HistogramDataPoint_Min) - } - ov.Min = math.Float64frombits(num) - orig.Min_ = ov + orig.SetMin(math.Float64frombits(num)) case 12: if wireType != proto.WireTypeI64 { @@ -643,14 +610,7 @@ func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, if err != nil { return err } - var ov *otlpmetrics.HistogramDataPoint_Max - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.HistogramDataPoint_Max{} - } else { - ov = ProtoPoolHistogramDataPoint_Max.Get().(*otlpmetrics.HistogramDataPoint_Max) - } - ov.Max = math.Float64frombits(num) - orig.Max_ = ov + orig.SetMax(math.Float64frombits(num)) default: pos, err = proto.ConsumeUnknown(buf, pos, wireType) if err != nil { @@ -660,3 +620,87 @@ func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, } return nil } + +const fieldBlockHistogramDataPointSum = uint64(0 >> 6) +const fieldBitHistogramDataPointSum = uint64(1 << 0 & 0x3F) + +func (m *HistogramDataPoint) SetSum(value float64) { + m.Sum = value + m.metadata[fieldBlockHistogramDataPointSum] |= fieldBitHistogramDataPointSum +} + +func (m *HistogramDataPoint) RemoveSum() { + m.Sum = float64(0) + m.metadata[fieldBlockHistogramDataPointSum] &^= fieldBitHistogramDataPointSum +} + +func (m *HistogramDataPoint) HasSum() bool { + return m.metadata[fieldBlockHistogramDataPointSum]&fieldBitHistogramDataPointSum != 0 +} + +const fieldBlockHistogramDataPointMin = uint64(1 >> 6) +const fieldBitHistogramDataPointMin = uint64(1 << 1 & 0x3F) + +func (m *HistogramDataPoint) SetMin(value float64) { + m.Min = value + m.metadata[fieldBlockHistogramDataPointMin] |= fieldBitHistogramDataPointMin +} + +func (m *HistogramDataPoint) RemoveMin() { + m.Min = float64(0) + m.metadata[fieldBlockHistogramDataPointMin] &^= fieldBitHistogramDataPointMin +} + +func (m *HistogramDataPoint) HasMin() bool { + return m.metadata[fieldBlockHistogramDataPointMin]&fieldBitHistogramDataPointMin != 0 +} + +const fieldBlockHistogramDataPointMax = uint64(2 >> 6) +const fieldBitHistogramDataPointMax = uint64(1 << 2 & 0x3F) + +func (m *HistogramDataPoint) SetMax(value float64) { + m.Max = value + m.metadata[fieldBlockHistogramDataPointMax] |= fieldBitHistogramDataPointMax +} + +func (m *HistogramDataPoint) RemoveMax() { + m.Max = float64(0) + m.metadata[fieldBlockHistogramDataPointMax] &^= fieldBitHistogramDataPointMax +} + +func (m *HistogramDataPoint) HasMax() bool { + return m.metadata[fieldBlockHistogramDataPointMax]&fieldBitHistogramDataPointMax != 0 +} + +func GenTestHistogramDataPoint() *HistogramDataPoint { + orig := NewHistogramDataPoint() + orig.Attributes = []KeyValue{{}, *GenTestKeyValue()} + orig.StartTimeUnixNano = uint64(13) + orig.TimeUnixNano = uint64(13) + orig.Count = uint64(13) + orig.SetSum(float64(3.1415926)) + orig.BucketCounts = []uint64{uint64(0), uint64(13)} + orig.ExplicitBounds = []float64{float64(0), float64(3.1415926)} + orig.Exemplars = []Exemplar{{}, *GenTestExemplar()} + orig.Flags = uint32(13) + orig.SetMin(float64(3.1415926)) + orig.SetMax(float64(3.1415926)) + return orig +} + +func GenTestHistogramDataPointPtrSlice() []*HistogramDataPoint { + orig := make([]*HistogramDataPoint, 5) + orig[0] = NewHistogramDataPoint() + orig[1] = GenTestHistogramDataPoint() + orig[2] = NewHistogramDataPoint() + orig[3] = GenTestHistogramDataPoint() + orig[4] = NewHistogramDataPoint() + return orig +} + +func GenTestHistogramDataPointSlice() []HistogramDataPoint { + orig := make([]HistogramDataPoint, 5) + orig[1] = *GenTestHistogramDataPoint() + orig[3] = *GenTestHistogramDataPoint() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_instrumentationscope.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_instrumentationscope.go new file mode 100644 index 00000000000..ab44a2eabb9 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_instrumentationscope.go @@ -0,0 +1,342 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// InstrumentationScope is a message representing the instrumentation scope information. +type InstrumentationScope struct { + Name string + Version string + Attributes []KeyValue + DroppedAttributesCount uint32 +} + +var ( + protoPoolInstrumentationScope = sync.Pool{ + New: func() any { + return &InstrumentationScope{} + }, + } +) + +func NewInstrumentationScope() *InstrumentationScope { + if !UseProtoPooling.IsEnabled() { + return &InstrumentationScope{} + } + return protoPoolInstrumentationScope.Get().(*InstrumentationScope) +} + +func DeleteInstrumentationScope(orig *InstrumentationScope, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + for i := range orig.Attributes { + DeleteKeyValue(&orig.Attributes[i], false) + } + + orig.Reset() + if nullable { + protoPoolInstrumentationScope.Put(orig) + } +} + +func CopyInstrumentationScope(dest, src *InstrumentationScope) *InstrumentationScope { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewInstrumentationScope() + } + dest.Name = src.Name + dest.Version = src.Version + dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes) + + dest.DroppedAttributesCount = src.DroppedAttributesCount + + return dest +} + +func CopyInstrumentationScopeSlice(dest, src []InstrumentationScope) []InstrumentationScope { + var newDest []InstrumentationScope + if cap(dest) < len(src) { + newDest = make([]InstrumentationScope, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteInstrumentationScope(&dest[i], false) + } + } + for i := range src { + CopyInstrumentationScope(&newDest[i], &src[i]) + } + return newDest +} + +func CopyInstrumentationScopePtrSlice(dest, src []*InstrumentationScope) []*InstrumentationScope { + var newDest []*InstrumentationScope + if cap(dest) < len(src) { + newDest = make([]*InstrumentationScope, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewInstrumentationScope() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteInstrumentationScope(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewInstrumentationScope() + } + } + for i := range src { + CopyInstrumentationScope(newDest[i], src[i]) + } + return newDest +} + +func (orig *InstrumentationScope) Reset() { + *orig = InstrumentationScope{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *InstrumentationScope) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.Name != "" { + dest.WriteObjectField("name") + dest.WriteString(orig.Name) + } + if orig.Version != "" { + dest.WriteObjectField("version") + dest.WriteString(orig.Version) + } + if len(orig.Attributes) > 0 { + dest.WriteObjectField("attributes") + dest.WriteArrayStart() + orig.Attributes[0].MarshalJSON(dest) + for i := 1; i < len(orig.Attributes); i++ { + dest.WriteMore() + orig.Attributes[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + if orig.DroppedAttributesCount != uint32(0) { + dest.WriteObjectField("droppedAttributesCount") + dest.WriteUint32(orig.DroppedAttributesCount) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *InstrumentationScope) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "name": + orig.Name = iter.ReadString() + case "version": + orig.Version = iter.ReadString() + case "attributes": + for iter.ReadArray() { + orig.Attributes = append(orig.Attributes, KeyValue{}) + orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter) + } + + case "droppedAttributesCount", "dropped_attributes_count": + orig.DroppedAttributesCount = iter.ReadUint32() + default: + iter.Skip() + } + } +} + +func (orig *InstrumentationScope) SizeProto() int { + var n int + var l int + _ = l + + l = len(orig.Name) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.Version) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + for i := range orig.Attributes { + l = orig.Attributes[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + if orig.DroppedAttributesCount != uint32(0) { + n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount)) + } + return n +} + +func (orig *InstrumentationScope) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = len(orig.Name) + if l > 0 { + pos -= l + copy(buf[pos:], orig.Name) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + l = len(orig.Version) + if l > 0 { + pos -= l + copy(buf[pos:], orig.Version) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + for i := len(orig.Attributes) - 1; i >= 0; i-- { + l = orig.Attributes[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + if orig.DroppedAttributesCount != uint32(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount)) + pos-- + buf[pos] = 0x20 + } + return len(buf) - pos +} + +func (orig *InstrumentationScope) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Name = string(buf[startPos:pos]) + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Version = string(buf[startPos:pos]) + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Attributes = append(orig.Attributes, KeyValue{}) + err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 4: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.DroppedAttributesCount = uint32(num) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestInstrumentationScope() *InstrumentationScope { + orig := NewInstrumentationScope() + orig.Name = "test_name" + orig.Version = "test_version" + orig.Attributes = []KeyValue{{}, *GenTestKeyValue()} + orig.DroppedAttributesCount = uint32(13) + return orig +} + +func GenTestInstrumentationScopePtrSlice() []*InstrumentationScope { + orig := make([]*InstrumentationScope, 5) + orig[0] = NewInstrumentationScope() + orig[1] = GenTestInstrumentationScope() + orig[2] = NewInstrumentationScope() + orig[3] = GenTestInstrumentationScope() + orig[4] = NewInstrumentationScope() + return orig +} + +func GenTestInstrumentationScopeSlice() []InstrumentationScope { + orig := make([]InstrumentationScope, 5) + orig[1] = *GenTestInstrumentationScope() + orig[3] = *GenTestInstrumentationScope() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_ipaddr.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_ipaddr.go new file mode 100644 index 00000000000..d77ec13a99a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_ipaddr.go @@ -0,0 +1,266 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +type IPAddr struct { + Zone string + IP []byte +} + +var ( + protoPoolIPAddr = sync.Pool{ + New: func() any { + return &IPAddr{} + }, + } +) + +func NewIPAddr() *IPAddr { + if !UseProtoPooling.IsEnabled() { + return &IPAddr{} + } + return protoPoolIPAddr.Get().(*IPAddr) +} + +func DeleteIPAddr(orig *IPAddr, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + orig.Reset() + if nullable { + protoPoolIPAddr.Put(orig) + } +} + +func CopyIPAddr(dest, src *IPAddr) *IPAddr { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewIPAddr() + } + dest.IP = src.IP + dest.Zone = src.Zone + + return dest +} + +func CopyIPAddrSlice(dest, src []IPAddr) []IPAddr { + var newDest []IPAddr + if cap(dest) < len(src) { + newDest = make([]IPAddr, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteIPAddr(&dest[i], false) + } + } + for i := range src { + CopyIPAddr(&newDest[i], &src[i]) + } + return newDest +} + +func CopyIPAddrPtrSlice(dest, src []*IPAddr) []*IPAddr { + var newDest []*IPAddr + if cap(dest) < len(src) { + newDest = make([]*IPAddr, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewIPAddr() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteIPAddr(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewIPAddr() + } + } + for i := range src { + CopyIPAddr(newDest[i], src[i]) + } + return newDest +} + +func (orig *IPAddr) Reset() { + *orig = IPAddr{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *IPAddr) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + + if len(orig.IP) > 0 { + dest.WriteObjectField("iP") + dest.WriteBytes(orig.IP) + } + if orig.Zone != "" { + dest.WriteObjectField("zone") + dest.WriteString(orig.Zone) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *IPAddr) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "iP": + orig.IP = iter.ReadBytes() + case "zone": + orig.Zone = iter.ReadString() + default: + iter.Skip() + } + } +} + +func (orig *IPAddr) SizeProto() int { + var n int + var l int + _ = l + + l = len(orig.IP) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.Zone) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *IPAddr) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = len(orig.IP) + if l > 0 { + pos -= l + copy(buf[pos:], orig.IP) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + l = len(orig.Zone) + if l > 0 { + pos -= l + copy(buf[pos:], orig.Zone) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + return len(buf) - pos +} + +func (orig *IPAddr) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + if length != 0 { + orig.IP = make([]byte, length) + copy(orig.IP, buf[startPos:pos]) + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Zone = string(buf[startPos:pos]) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestIPAddr() *IPAddr { + orig := NewIPAddr() + orig.IP = []byte{1, 2, 3} + orig.Zone = "test_zone" + return orig +} + +func GenTestIPAddrPtrSlice() []*IPAddr { + orig := make([]*IPAddr, 5) + orig[0] = NewIPAddr() + orig[1] = GenTestIPAddr() + orig[2] = NewIPAddr() + orig[3] = GenTestIPAddr() + orig[4] = NewIPAddr() + return orig +} + +func GenTestIPAddrSlice() []IPAddr { + orig := make([]IPAddr, 5) + orig[1] = *GenTestIPAddr() + orig[3] = *GenTestIPAddr() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalue.go new file mode 100644 index 00000000000..038cb114d87 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalue.go @@ -0,0 +1,261 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +type KeyValue struct { + Value AnyValue + Key string +} + +var ( + protoPoolKeyValue = sync.Pool{ + New: func() any { + return &KeyValue{} + }, + } +) + +func NewKeyValue() *KeyValue { + if !UseProtoPooling.IsEnabled() { + return &KeyValue{} + } + return protoPoolKeyValue.Get().(*KeyValue) +} + +func DeleteKeyValue(orig *KeyValue, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + DeleteAnyValue(&orig.Value, false) + orig.Reset() + if nullable { + protoPoolKeyValue.Put(orig) + } +} + +func CopyKeyValue(dest, src *KeyValue) *KeyValue { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewKeyValue() + } + dest.Key = src.Key + CopyAnyValue(&dest.Value, &src.Value) + + return dest +} + +func CopyKeyValueSlice(dest, src []KeyValue) []KeyValue { + var newDest []KeyValue + if cap(dest) < len(src) { + newDest = make([]KeyValue, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteKeyValue(&dest[i], false) + } + } + for i := range src { + CopyKeyValue(&newDest[i], &src[i]) + } + return newDest +} + +func CopyKeyValuePtrSlice(dest, src []*KeyValue) []*KeyValue { + var newDest []*KeyValue + if cap(dest) < len(src) { + newDest = make([]*KeyValue, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewKeyValue() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteKeyValue(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewKeyValue() + } + } + for i := range src { + CopyKeyValue(newDest[i], src[i]) + } + return newDest +} + +func (orig *KeyValue) Reset() { + *orig = KeyValue{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *KeyValue) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.Key != "" { + dest.WriteObjectField("key") + dest.WriteString(orig.Key) + } + dest.WriteObjectField("value") + orig.Value.MarshalJSON(dest) + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *KeyValue) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "key": + orig.Key = iter.ReadString() + case "value": + + orig.Value.UnmarshalJSON(iter) + default: + iter.Skip() + } + } +} + +func (orig *KeyValue) SizeProto() int { + var n int + var l int + _ = l + + l = len(orig.Key) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + l = orig.Value.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + return n +} + +func (orig *KeyValue) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = len(orig.Key) + if l > 0 { + pos -= l + copy(buf[pos:], orig.Key) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + l = orig.Value.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + + return len(buf) - pos +} + +func (orig *KeyValue) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Key = string(buf[startPos:pos]) + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.Value.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestKeyValue() *KeyValue { + orig := NewKeyValue() + orig.Key = "test_key" + orig.Value = *GenTestAnyValue() + return orig +} + +func GenTestKeyValuePtrSlice() []*KeyValue { + orig := make([]*KeyValue, 5) + orig[0] = NewKeyValue() + orig[1] = GenTestKeyValue() + orig[2] = NewKeyValue() + orig[3] = GenTestKeyValue() + orig[4] = NewKeyValue() + return orig +} + +func GenTestKeyValueSlice() []KeyValue { + orig := make([]KeyValue, 5) + orig[1] = *GenTestKeyValue() + orig[3] = *GenTestKeyValue() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalueandunit.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalueandunit.go new file mode 100644 index 00000000000..493ad947fd5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalueandunit.go @@ -0,0 +1,288 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// KeyValueAndUnit represents a custom 'dictionary native' +// style of encoding attributes which is more convenient +// for profiles than opentelemetry.proto.common.v1.KeyValue. +type KeyValueAndUnit struct { + Value AnyValue + KeyStrindex int32 + UnitStrindex int32 +} + +var ( + protoPoolKeyValueAndUnit = sync.Pool{ + New: func() any { + return &KeyValueAndUnit{} + }, + } +) + +func NewKeyValueAndUnit() *KeyValueAndUnit { + if !UseProtoPooling.IsEnabled() { + return &KeyValueAndUnit{} + } + return protoPoolKeyValueAndUnit.Get().(*KeyValueAndUnit) +} + +func DeleteKeyValueAndUnit(orig *KeyValueAndUnit, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + DeleteAnyValue(&orig.Value, false) + + orig.Reset() + if nullable { + protoPoolKeyValueAndUnit.Put(orig) + } +} + +func CopyKeyValueAndUnit(dest, src *KeyValueAndUnit) *KeyValueAndUnit { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewKeyValueAndUnit() + } + dest.KeyStrindex = src.KeyStrindex + CopyAnyValue(&dest.Value, &src.Value) + + dest.UnitStrindex = src.UnitStrindex + + return dest +} + +func CopyKeyValueAndUnitSlice(dest, src []KeyValueAndUnit) []KeyValueAndUnit { + var newDest []KeyValueAndUnit + if cap(dest) < len(src) { + newDest = make([]KeyValueAndUnit, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteKeyValueAndUnit(&dest[i], false) + } + } + for i := range src { + CopyKeyValueAndUnit(&newDest[i], &src[i]) + } + return newDest +} + +func CopyKeyValueAndUnitPtrSlice(dest, src []*KeyValueAndUnit) []*KeyValueAndUnit { + var newDest []*KeyValueAndUnit + if cap(dest) < len(src) { + newDest = make([]*KeyValueAndUnit, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewKeyValueAndUnit() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteKeyValueAndUnit(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewKeyValueAndUnit() + } + } + for i := range src { + CopyKeyValueAndUnit(newDest[i], src[i]) + } + return newDest +} + +func (orig *KeyValueAndUnit) Reset() { + *orig = KeyValueAndUnit{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *KeyValueAndUnit) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.KeyStrindex != int32(0) { + dest.WriteObjectField("keyStrindex") + dest.WriteInt32(orig.KeyStrindex) + } + dest.WriteObjectField("value") + orig.Value.MarshalJSON(dest) + if orig.UnitStrindex != int32(0) { + dest.WriteObjectField("unitStrindex") + dest.WriteInt32(orig.UnitStrindex) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *KeyValueAndUnit) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "keyStrindex", "key_strindex": + orig.KeyStrindex = iter.ReadInt32() + case "value": + + orig.Value.UnmarshalJSON(iter) + case "unitStrindex", "unit_strindex": + orig.UnitStrindex = iter.ReadInt32() + default: + iter.Skip() + } + } +} + +func (orig *KeyValueAndUnit) SizeProto() int { + var n int + var l int + _ = l + if orig.KeyStrindex != int32(0) { + n += 1 + proto.Sov(uint64(orig.KeyStrindex)) + } + l = orig.Value.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + if orig.UnitStrindex != int32(0) { + n += 1 + proto.Sov(uint64(orig.UnitStrindex)) + } + return n +} + +func (orig *KeyValueAndUnit) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + if orig.KeyStrindex != int32(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.KeyStrindex)) + pos-- + buf[pos] = 0x8 + } + l = orig.Value.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + + if orig.UnitStrindex != int32(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.UnitStrindex)) + pos-- + buf[pos] = 0x18 + } + return len(buf) - pos +} + +func (orig *KeyValueAndUnit) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field KeyStrindex", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.KeyStrindex = int32(num) + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.Value.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.UnitStrindex = int32(num) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestKeyValueAndUnit() *KeyValueAndUnit { + orig := NewKeyValueAndUnit() + orig.KeyStrindex = int32(13) + orig.Value = *GenTestAnyValue() + orig.UnitStrindex = int32(13) + return orig +} + +func GenTestKeyValueAndUnitPtrSlice() []*KeyValueAndUnit { + orig := make([]*KeyValueAndUnit, 5) + orig[0] = NewKeyValueAndUnit() + orig[1] = GenTestKeyValueAndUnit() + orig[2] = NewKeyValueAndUnit() + orig[3] = GenTestKeyValueAndUnit() + orig[4] = NewKeyValueAndUnit() + return orig +} + +func GenTestKeyValueAndUnitSlice() []KeyValueAndUnit { + orig := make([]KeyValueAndUnit, 5) + orig[1] = *GenTestKeyValueAndUnit() + orig[3] = *GenTestKeyValueAndUnit() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvaluelist.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvaluelist.go new file mode 100644 index 00000000000..9e01d808a20 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvaluelist.go @@ -0,0 +1,243 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message since oneof in AnyValue does not allow repeated fields. +type KeyValueList struct { + Values []KeyValue +} + +var ( + protoPoolKeyValueList = sync.Pool{ + New: func() any { + return &KeyValueList{} + }, + } +) + +func NewKeyValueList() *KeyValueList { + if !UseProtoPooling.IsEnabled() { + return &KeyValueList{} + } + return protoPoolKeyValueList.Get().(*KeyValueList) +} + +func DeleteKeyValueList(orig *KeyValueList, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.Values { + DeleteKeyValue(&orig.Values[i], false) + } + orig.Reset() + if nullable { + protoPoolKeyValueList.Put(orig) + } +} + +func CopyKeyValueList(dest, src *KeyValueList) *KeyValueList { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewKeyValueList() + } + dest.Values = CopyKeyValueSlice(dest.Values, src.Values) + + return dest +} + +func CopyKeyValueListSlice(dest, src []KeyValueList) []KeyValueList { + var newDest []KeyValueList + if cap(dest) < len(src) { + newDest = make([]KeyValueList, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteKeyValueList(&dest[i], false) + } + } + for i := range src { + CopyKeyValueList(&newDest[i], &src[i]) + } + return newDest +} + +func CopyKeyValueListPtrSlice(dest, src []*KeyValueList) []*KeyValueList { + var newDest []*KeyValueList + if cap(dest) < len(src) { + newDest = make([]*KeyValueList, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewKeyValueList() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteKeyValueList(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewKeyValueList() + } + } + for i := range src { + CopyKeyValueList(newDest[i], src[i]) + } + return newDest +} + +func (orig *KeyValueList) Reset() { + *orig = KeyValueList{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *KeyValueList) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.Values) > 0 { + dest.WriteObjectField("values") + dest.WriteArrayStart() + orig.Values[0].MarshalJSON(dest) + for i := 1; i < len(orig.Values); i++ { + dest.WriteMore() + orig.Values[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *KeyValueList) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "values": + for iter.ReadArray() { + orig.Values = append(orig.Values, KeyValue{}) + orig.Values[len(orig.Values)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *KeyValueList) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.Values { + l = orig.Values[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *KeyValueList) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.Values) - 1; i >= 0; i-- { + l = orig.Values[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + return len(buf) - pos +} + +func (orig *KeyValueList) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Values = append(orig.Values, KeyValue{}) + err = orig.Values[len(orig.Values)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestKeyValueList() *KeyValueList { + orig := NewKeyValueList() + orig.Values = []KeyValue{{}, *GenTestKeyValue()} + return orig +} + +func GenTestKeyValueListPtrSlice() []*KeyValueList { + orig := make([]*KeyValueList, 5) + orig[0] = NewKeyValueList() + orig[1] = GenTestKeyValueList() + orig[2] = NewKeyValueList() + orig[3] = GenTestKeyValueList() + orig[4] = NewKeyValueList() + return orig +} + +func GenTestKeyValueListSlice() []KeyValueList { + orig := make([]KeyValueList, 5) + orig[1] = *GenTestKeyValueList() + orig[3] = *GenTestKeyValueList() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_line.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_line.go similarity index 54% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_line.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_line.go index 4b455cfb17d..f6db6871bb3 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_line.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_line.go @@ -10,27 +10,33 @@ import ( "fmt" "sync" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// Line details a specific line in a source code, linked to a function. +type Line struct { + FunctionIndex int32 + Line int64 + Column int64 +} + var ( protoPoolLine = sync.Pool{ New: func() any { - return &otlpprofiles.Line{} + return &Line{} }, } ) -func NewOrigLine() *otlpprofiles.Line { +func NewLine() *Line { if !UseProtoPooling.IsEnabled() { - return &otlpprofiles.Line{} + return &Line{} } - return protoPoolLine.Get().(*otlpprofiles.Line) + return protoPoolLine.Get().(*Line) } -func DeleteOrigLine(orig *otlpprofiles.Line, nullable bool) { +func DeleteLine(orig *Line, nullable bool) { if orig == nil { return } @@ -46,26 +52,80 @@ func DeleteOrigLine(orig *otlpprofiles.Line, nullable bool) { } } -func CopyOrigLine(dest, src *otlpprofiles.Line) { +func CopyLine(dest, src *Line) *Line { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewLine() } dest.FunctionIndex = src.FunctionIndex dest.Line = src.Line dest.Column = src.Column + + return dest } -func GenTestOrigLine() *otlpprofiles.Line { - orig := NewOrigLine() - orig.FunctionIndex = int32(13) - orig.Line = int64(13) - orig.Column = int64(13) - return orig +func CopyLineSlice(dest, src []Line) []Line { + var newDest []Line + if cap(dest) < len(src) { + newDest = make([]Line, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteLine(&dest[i], false) + } + } + for i := range src { + CopyLine(&newDest[i], &src[i]) + } + return newDest } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigLine(orig *otlpprofiles.Line, dest *json.Stream) { +func CopyLinePtrSlice(dest, src []*Line) []*Line { + var newDest []*Line + if cap(dest) < len(src) { + newDest = make([]*Line, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewLine() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteLine(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewLine() + } + } + for i := range src { + CopyLine(newDest[i], src[i]) + } + return newDest +} + +func (orig *Line) Reset() { + *orig = Line{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Line) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() if orig.FunctionIndex != int32(0) { dest.WriteObjectField("functionIndex") @@ -82,8 +142,8 @@ func MarshalJSONOrigLine(orig *otlpprofiles.Line, dest *json.Stream) { dest.WriteObjectEnd() } -// UnmarshalJSONOrigLine unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigLine(orig *otlpprofiles.Line, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Line) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "functionIndex", "function_index": @@ -98,37 +158,37 @@ func UnmarshalJSONOrigLine(orig *otlpprofiles.Line, iter *json.Iterator) { } } -func SizeProtoOrigLine(orig *otlpprofiles.Line) int { +func (orig *Line) SizeProto() int { var n int var l int _ = l - if orig.FunctionIndex != 0 { + if orig.FunctionIndex != int32(0) { n += 1 + proto.Sov(uint64(orig.FunctionIndex)) } - if orig.Line != 0 { + if orig.Line != int64(0) { n += 1 + proto.Sov(uint64(orig.Line)) } - if orig.Column != 0 { + if orig.Column != int64(0) { n += 1 + proto.Sov(uint64(orig.Column)) } return n } -func MarshalProtoOrigLine(orig *otlpprofiles.Line, buf []byte) int { +func (orig *Line) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l - if orig.FunctionIndex != 0 { + if orig.FunctionIndex != int32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.FunctionIndex)) pos-- buf[pos] = 0x8 } - if orig.Line != 0 { + if orig.Line != int64(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.Line)) pos-- buf[pos] = 0x10 } - if orig.Column != 0 { + if orig.Column != int64(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.Column)) pos-- buf[pos] = 0x18 @@ -136,7 +196,7 @@ func MarshalProtoOrigLine(orig *otlpprofiles.Line, buf []byte) int { return len(buf) - pos } -func UnmarshalProtoOrigLine(orig *otlpprofiles.Line, buf []byte) error { +func (orig *Line) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -160,7 +220,6 @@ func UnmarshalProtoOrigLine(orig *otlpprofiles.Line, buf []byte) error { if err != nil { return err } - orig.FunctionIndex = int32(num) case 2: @@ -172,7 +231,6 @@ func UnmarshalProtoOrigLine(orig *otlpprofiles.Line, buf []byte) error { if err != nil { return err } - orig.Line = int64(num) case 3: @@ -184,7 +242,6 @@ func UnmarshalProtoOrigLine(orig *otlpprofiles.Line, buf []byte) error { if err != nil { return err } - orig.Column = int64(num) default: pos, err = proto.ConsumeUnknown(buf, pos, wireType) @@ -195,3 +252,28 @@ func UnmarshalProtoOrigLine(orig *otlpprofiles.Line, buf []byte) error { } return nil } + +func GenTestLine() *Line { + orig := NewLine() + orig.FunctionIndex = int32(13) + orig.Line = int64(13) + orig.Column = int64(13) + return orig +} + +func GenTestLinePtrSlice() []*Line { + orig := make([]*Line, 5) + orig[0] = NewLine() + orig[1] = GenTestLine() + orig[2] = NewLine() + orig[3] = GenTestLine() + orig[4] = NewLine() + return orig +} + +func GenTestLineSlice() []Line { + orig := make([]Line, 5) + orig[1] = *GenTestLine() + orig[3] = *GenTestLine() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_link.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_link.go new file mode 100644 index 00000000000..6df8aa77f84 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_link.go @@ -0,0 +1,265 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// Link represents a pointer from a profile Sample to a trace Span. +type Link struct { + TraceId TraceID + SpanId SpanID +} + +var ( + protoPoolLink = sync.Pool{ + New: func() any { + return &Link{} + }, + } +) + +func NewLink() *Link { + if !UseProtoPooling.IsEnabled() { + return &Link{} + } + return protoPoolLink.Get().(*Link) +} + +func DeleteLink(orig *Link, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteTraceID(&orig.TraceId, false) + DeleteSpanID(&orig.SpanId, false) + orig.Reset() + if nullable { + protoPoolLink.Put(orig) + } +} + +func CopyLink(dest, src *Link) *Link { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewLink() + } + CopyTraceID(&dest.TraceId, &src.TraceId) + + CopySpanID(&dest.SpanId, &src.SpanId) + + return dest +} + +func CopyLinkSlice(dest, src []Link) []Link { + var newDest []Link + if cap(dest) < len(src) { + newDest = make([]Link, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteLink(&dest[i], false) + } + } + for i := range src { + CopyLink(&newDest[i], &src[i]) + } + return newDest +} + +func CopyLinkPtrSlice(dest, src []*Link) []*Link { + var newDest []*Link + if cap(dest) < len(src) { + newDest = make([]*Link, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewLink() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteLink(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewLink() + } + } + for i := range src { + CopyLink(newDest[i], src[i]) + } + return newDest +} + +func (orig *Link) Reset() { + *orig = Link{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Link) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if !orig.TraceId.IsEmpty() { + dest.WriteObjectField("traceId") + orig.TraceId.MarshalJSON(dest) + } + if !orig.SpanId.IsEmpty() { + dest.WriteObjectField("spanId") + orig.SpanId.MarshalJSON(dest) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Link) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "traceId", "trace_id": + + orig.TraceId.UnmarshalJSON(iter) + case "spanId", "span_id": + + orig.SpanId.UnmarshalJSON(iter) + default: + iter.Skip() + } + } +} + +func (orig *Link) SizeProto() int { + var n int + var l int + _ = l + l = orig.TraceId.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + l = orig.SpanId.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + return n +} + +func (orig *Link) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.TraceId.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + l = orig.SpanId.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + + return len(buf) - pos +} + +func (orig *Link) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.TraceId.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.SpanId.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestLink() *Link { + orig := NewLink() + orig.TraceId = *GenTestTraceID() + orig.SpanId = *GenTestSpanID() + return orig +} + +func GenTestLinkPtrSlice() []*Link { + orig := make([]*Link, 5) + orig[0] = NewLink() + orig[1] = GenTestLink() + orig[2] = NewLink() + orig[3] = GenTestLink() + orig[4] = NewLink() + return orig +} + +func GenTestLinkSlice() []Link { + orig := make([]Link, 5) + orig[1] = *GenTestLink() + orig[3] = *GenTestLink() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_location.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_location.go similarity index 56% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_location.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_location.go index f36da20a6e2..a9eebb1f66c 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_location.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_location.go @@ -10,27 +10,34 @@ import ( "fmt" "sync" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// Location describes function and line table debug information. +type Location struct { + Lines []*Line + AttributeIndices []int32 + Address uint64 + MappingIndex int32 +} + var ( protoPoolLocation = sync.Pool{ New: func() any { - return &otlpprofiles.Location{} + return &Location{} }, } ) -func NewOrigLocation() *otlpprofiles.Location { +func NewLocation() *Location { if !UseProtoPooling.IsEnabled() { - return &otlpprofiles.Location{} + return &Location{} } - return protoPoolLocation.Get().(*otlpprofiles.Location) + return protoPoolLocation.Get().(*Location) } -func DeleteOrigLocation(orig *otlpprofiles.Location, nullable bool) { +func DeleteLocation(orig *Location, nullable bool) { if orig == nil { return } @@ -40,8 +47,8 @@ func DeleteOrigLocation(orig *otlpprofiles.Location, nullable bool) { return } - for i := range orig.Line { - DeleteOrigLine(orig.Line[i], true) + for i := range orig.Lines { + DeleteLine(orig.Lines[i], true) } orig.Reset() @@ -50,28 +57,82 @@ func DeleteOrigLocation(orig *otlpprofiles.Location, nullable bool) { } } -func CopyOrigLocation(dest, src *otlpprofiles.Location) { +func CopyLocation(dest, src *Location) *Location { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewLocation() } dest.MappingIndex = src.MappingIndex dest.Address = src.Address - dest.Line = CopyOrigLineSlice(dest.Line, src.Line) - dest.AttributeIndices = CopyOrigInt32Slice(dest.AttributeIndices, src.AttributeIndices) + dest.Lines = CopyLinePtrSlice(dest.Lines, src.Lines) + + dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...) + + return dest } -func GenTestOrigLocation() *otlpprofiles.Location { - orig := NewOrigLocation() - orig.MappingIndex = int32(13) - orig.Address = uint64(13) - orig.Line = GenerateOrigTestLineSlice() - orig.AttributeIndices = GenerateOrigTestInt32Slice() - return orig +func CopyLocationSlice(dest, src []Location) []Location { + var newDest []Location + if cap(dest) < len(src) { + newDest = make([]Location, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteLocation(&dest[i], false) + } + } + for i := range src { + CopyLocation(&newDest[i], &src[i]) + } + return newDest +} + +func CopyLocationPtrSlice(dest, src []*Location) []*Location { + var newDest []*Location + if cap(dest) < len(src) { + newDest = make([]*Location, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewLocation() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteLocation(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewLocation() + } + } + for i := range src { + CopyLocation(newDest[i], src[i]) + } + return newDest } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigLocation(orig *otlpprofiles.Location, dest *json.Stream) { +func (orig *Location) Reset() { + *orig = Location{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Location) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() if orig.MappingIndex != int32(0) { dest.WriteObjectField("mappingIndex") @@ -81,13 +142,13 @@ func MarshalJSONOrigLocation(orig *otlpprofiles.Location, dest *json.Stream) { dest.WriteObjectField("address") dest.WriteUint64(orig.Address) } - if len(orig.Line) > 0 { - dest.WriteObjectField("line") + if len(orig.Lines) > 0 { + dest.WriteObjectField("lines") dest.WriteArrayStart() - MarshalJSONOrigLine(orig.Line[0], dest) - for i := 1; i < len(orig.Line); i++ { + orig.Lines[0].MarshalJSON(dest) + for i := 1; i < len(orig.Lines); i++ { dest.WriteMore() - MarshalJSONOrigLine(orig.Line[i], dest) + orig.Lines[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -101,21 +162,22 @@ func MarshalJSONOrigLocation(orig *otlpprofiles.Location, dest *json.Stream) { } dest.WriteArrayEnd() } + dest.WriteObjectEnd() } -// UnmarshalJSONOrigLocation unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigLocation(orig *otlpprofiles.Location, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Location) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "mappingIndex", "mapping_index": orig.MappingIndex = iter.ReadInt32() case "address": orig.Address = iter.ReadUint64() - case "line": + case "lines": for iter.ReadArray() { - orig.Line = append(orig.Line, NewOrigLine()) - UnmarshalJSONOrigLine(orig.Line[len(orig.Line)-1], iter) + orig.Lines = append(orig.Lines, NewLine()) + orig.Lines[len(orig.Lines)-1].UnmarshalJSON(iter) } case "attributeIndices", "attribute_indices": @@ -129,20 +191,21 @@ func UnmarshalJSONOrigLocation(orig *otlpprofiles.Location, iter *json.Iterator) } } -func SizeProtoOrigLocation(orig *otlpprofiles.Location) int { +func (orig *Location) SizeProto() int { var n int var l int _ = l - if orig.MappingIndex != 0 { + if orig.MappingIndex != int32(0) { n += 1 + proto.Sov(uint64(orig.MappingIndex)) } - if orig.Address != 0 { + if orig.Address != uint64(0) { n += 1 + proto.Sov(uint64(orig.Address)) } - for i := range orig.Line { - l = SizeProtoOrigLine(orig.Line[i]) + for i := range orig.Lines { + l = orig.Lines[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } + if len(orig.AttributeIndices) > 0 { l = 0 for _, e := range orig.AttributeIndices { @@ -153,22 +216,22 @@ func SizeProtoOrigLocation(orig *otlpprofiles.Location) int { return n } -func MarshalProtoOrigLocation(orig *otlpprofiles.Location, buf []byte) int { +func (orig *Location) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l - if orig.MappingIndex != 0 { + if orig.MappingIndex != int32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.MappingIndex)) pos-- buf[pos] = 0x8 } - if orig.Address != 0 { + if orig.Address != uint64(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.Address)) pos-- buf[pos] = 0x10 } - for i := len(orig.Line) - 1; i >= 0; i-- { - l = MarshalProtoOrigLine(orig.Line[i], buf[:pos]) + for i := len(orig.Lines) - 1; i >= 0; i-- { + l = orig.Lines[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- @@ -187,7 +250,7 @@ func MarshalProtoOrigLocation(orig *otlpprofiles.Location, buf []byte) int { return len(buf) - pos } -func UnmarshalProtoOrigLocation(orig *otlpprofiles.Location, buf []byte) error { +func (orig *Location) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -211,7 +274,6 @@ func UnmarshalProtoOrigLocation(orig *otlpprofiles.Location, buf []byte) error { if err != nil { return err } - orig.MappingIndex = int32(num) case 2: @@ -223,12 +285,11 @@ func UnmarshalProtoOrigLocation(orig *otlpprofiles.Location, buf []byte) error { if err != nil { return err } - orig.Address = uint64(num) case 3: if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lines", wireType) } var length int length, pos, err = proto.ConsumeLen(buf, pos) @@ -236,8 +297,8 @@ func UnmarshalProtoOrigLocation(orig *otlpprofiles.Location, buf []byte) error { return err } startPos := pos - length - orig.Line = append(orig.Line, NewOrigLine()) - err = UnmarshalProtoOrigLine(orig.Line[len(orig.Line)-1], buf[startPos:pos]) + orig.Lines = append(orig.Lines, NewLine()) + err = orig.Lines[len(orig.Lines)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -280,3 +341,29 @@ func UnmarshalProtoOrigLocation(orig *otlpprofiles.Location, buf []byte) error { } return nil } + +func GenTestLocation() *Location { + orig := NewLocation() + orig.MappingIndex = int32(13) + orig.Address = uint64(13) + orig.Lines = []*Line{{}, GenTestLine()} + orig.AttributeIndices = []int32{int32(0), int32(13)} + return orig +} + +func GenTestLocationPtrSlice() []*Location { + orig := make([]*Location, 5) + orig[0] = NewLocation() + orig[1] = GenTestLocation() + orig[2] = NewLocation() + orig[3] = GenTestLocation() + orig[4] = NewLocation() + return orig +} + +func GenTestLocationSlice() []Location { + orig := make([]Location, 5) + orig[1] = *GenTestLocation() + orig[3] = *GenTestLocation() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_logrecord.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logrecord.go similarity index 61% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_logrecord.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logrecord.go index a43fdb38bfd..6d5057b5725 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_logrecord.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logrecord.go @@ -11,29 +11,42 @@ import ( "fmt" "sync" - "go.opentelemetry.io/collector/pdata/internal/data" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// LogRecord are experimental implementation of OpenTelemetry Log Data Model. + +type LogRecord struct { + Body AnyValue + SeverityText string + EventName string + Attributes []KeyValue + TimeUnixNano uint64 + ObservedTimeUnixNano uint64 + SeverityNumber SeverityNumber + DroppedAttributesCount uint32 + Flags uint32 + TraceId TraceID + SpanId SpanID +} + var ( protoPoolLogRecord = sync.Pool{ New: func() any { - return &otlplogs.LogRecord{} + return &LogRecord{} }, } ) -func NewOrigLogRecord() *otlplogs.LogRecord { +func NewLogRecord() *LogRecord { if !UseProtoPooling.IsEnabled() { - return &otlplogs.LogRecord{} + return &LogRecord{} } - return protoPoolLogRecord.Get().(*otlplogs.LogRecord) + return protoPoolLogRecord.Get().(*LogRecord) } -func DeleteOrigLogRecord(orig *otlplogs.LogRecord, nullable bool) { +func DeleteLogRecord(orig *LogRecord, nullable bool) { if orig == nil { return } @@ -43,12 +56,13 @@ func DeleteOrigLogRecord(orig *otlplogs.LogRecord, nullable bool) { return } - DeleteOrigAnyValue(&orig.Body, false) + DeleteAnyValue(&orig.Body, false) for i := range orig.Attributes { - DeleteOrigKeyValue(&orig.Attributes[i], false) + DeleteKeyValue(&orig.Attributes[i], false) } - DeleteOrigTraceID(&orig.TraceId, false) - DeleteOrigSpanID(&orig.SpanId, false) + + DeleteTraceID(&orig.TraceId, false) + DeleteSpanID(&orig.SpanId, false) orig.Reset() if nullable { @@ -56,42 +70,92 @@ func DeleteOrigLogRecord(orig *otlplogs.LogRecord, nullable bool) { } } -func CopyOrigLogRecord(dest, src *otlplogs.LogRecord) { +func CopyLogRecord(dest, src *LogRecord) *LogRecord { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewLogRecord() } dest.TimeUnixNano = src.TimeUnixNano dest.ObservedTimeUnixNano = src.ObservedTimeUnixNano dest.SeverityNumber = src.SeverityNumber dest.SeverityText = src.SeverityText - CopyOrigAnyValue(&dest.Body, &src.Body) - dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes) + CopyAnyValue(&dest.Body, &src.Body) + + dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes) + dest.DroppedAttributesCount = src.DroppedAttributesCount dest.Flags = src.Flags - dest.TraceId = src.TraceId - dest.SpanId = src.SpanId + CopyTraceID(&dest.TraceId, &src.TraceId) + + CopySpanID(&dest.SpanId, &src.SpanId) + dest.EventName = src.EventName + + return dest } -func GenTestOrigLogRecord() *otlplogs.LogRecord { - orig := NewOrigLogRecord() - orig.TimeUnixNano = 1234567890 - orig.ObservedTimeUnixNano = 1234567890 - orig.SeverityNumber = otlplogs.SeverityNumber(5) - orig.SeverityText = "test_severitytext" - orig.Body = *GenTestOrigAnyValue() - orig.Attributes = GenerateOrigTestKeyValueSlice() - orig.DroppedAttributesCount = uint32(13) - orig.Flags = 1 - orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) - orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1}) - orig.EventName = "test_eventname" - return orig +func CopyLogRecordSlice(dest, src []LogRecord) []LogRecord { + var newDest []LogRecord + if cap(dest) < len(src) { + newDest = make([]LogRecord, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteLogRecord(&dest[i], false) + } + } + for i := range src { + CopyLogRecord(&newDest[i], &src[i]) + } + return newDest +} + +func CopyLogRecordPtrSlice(dest, src []*LogRecord) []*LogRecord { + var newDest []*LogRecord + if cap(dest) < len(src) { + newDest = make([]*LogRecord, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewLogRecord() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteLogRecord(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewLogRecord() + } + } + for i := range src { + CopyLogRecord(newDest[i], src[i]) + } + return newDest +} + +func (orig *LogRecord) Reset() { + *orig = LogRecord{} } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, dest *json.Stream) { +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *LogRecord) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() if orig.TimeUnixNano != uint64(0) { dest.WriteObjectField("timeUnixNano") @@ -111,14 +175,14 @@ func MarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, dest *json.Stream) { dest.WriteString(orig.SeverityText) } dest.WriteObjectField("body") - MarshalJSONOrigAnyValue(&orig.Body, dest) + orig.Body.MarshalJSON(dest) if len(orig.Attributes) > 0 { dest.WriteObjectField("attributes") dest.WriteArrayStart() - MarshalJSONOrigKeyValue(&orig.Attributes[0], dest) + orig.Attributes[0].MarshalJSON(dest) for i := 1; i < len(orig.Attributes); i++ { dest.WriteMore() - MarshalJSONOrigKeyValue(&orig.Attributes[i], dest) + orig.Attributes[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -130,13 +194,13 @@ func MarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, dest *json.Stream) { dest.WriteObjectField("flags") dest.WriteUint32(orig.Flags) } - if orig.TraceId != data.TraceID([16]byte{}) { + if !orig.TraceId.IsEmpty() { dest.WriteObjectField("traceId") - MarshalJSONOrigTraceID(&orig.TraceId, dest) + orig.TraceId.MarshalJSON(dest) } - if orig.SpanId != data.SpanID([8]byte{}) { + if !orig.SpanId.IsEmpty() { dest.WriteObjectField("spanId") - MarshalJSONOrigSpanID(&orig.SpanId, dest) + orig.SpanId.MarshalJSON(dest) } if orig.EventName != "" { dest.WriteObjectField("eventName") @@ -145,8 +209,8 @@ func MarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, dest *json.Stream) { dest.WriteObjectEnd() } -// UnmarshalJSONOrigLogRecord unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *LogRecord) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "timeUnixNano", "time_unix_nano": @@ -154,15 +218,16 @@ func UnmarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, iter *json.Iterator) { case "observedTimeUnixNano", "observed_time_unix_nano": orig.ObservedTimeUnixNano = iter.ReadUint64() case "severityNumber", "severity_number": - orig.SeverityNumber = otlplogs.SeverityNumber(iter.ReadEnumValue(otlplogs.SeverityNumber_value)) + orig.SeverityNumber = SeverityNumber(iter.ReadEnumValue(SeverityNumber_value)) case "severityText", "severity_text": orig.SeverityText = iter.ReadString() case "body": - UnmarshalJSONOrigAnyValue(&orig.Body, iter) + + orig.Body.UnmarshalJSON(iter) case "attributes": for iter.ReadArray() { - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter) + orig.Attributes = append(orig.Attributes, KeyValue{}) + orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter) } case "droppedAttributesCount", "dropped_attributes_count": @@ -170,9 +235,11 @@ func UnmarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, iter *json.Iterator) { case "flags": orig.Flags = iter.ReadUint32() case "traceId", "trace_id": - UnmarshalJSONOrigTraceID(&orig.TraceId, iter) + + orig.TraceId.UnmarshalJSON(iter) case "spanId", "span_id": - UnmarshalJSONOrigSpanID(&orig.SpanId, iter) + + orig.SpanId.UnmarshalJSON(iter) case "eventName", "event_name": orig.EventName = iter.ReadString() default: @@ -181,39 +248,41 @@ func UnmarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, iter *json.Iterator) { } } -func SizeProtoOrigLogRecord(orig *otlplogs.LogRecord) int { +func (orig *LogRecord) SizeProto() int { var n int var l int _ = l - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { n += 9 } - if orig.ObservedTimeUnixNano != 0 { + if orig.ObservedTimeUnixNano != uint64(0) { n += 9 } - if orig.SeverityNumber != 0 { + if orig.SeverityNumber != SeverityNumber(0) { n += 1 + proto.Sov(uint64(orig.SeverityNumber)) } + l = len(orig.SeverityText) if l > 0 { n += 1 + proto.Sov(uint64(l)) + l } - l = SizeProtoOrigAnyValue(&orig.Body) + l = orig.Body.SizeProto() n += 1 + proto.Sov(uint64(l)) + l for i := range orig.Attributes { - l = SizeProtoOrigKeyValue(&orig.Attributes[i]) + l = orig.Attributes[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.DroppedAttributesCount != 0 { + if orig.DroppedAttributesCount != uint32(0) { n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount)) } - if orig.Flags != 0 { + if orig.Flags != uint32(0) { n += 5 } - l = SizeProtoOrigTraceID(&orig.TraceId) + l = orig.TraceId.SizeProto() n += 1 + proto.Sov(uint64(l)) + l - l = SizeProtoOrigSpanID(&orig.SpanId) + l = orig.SpanId.SizeProto() n += 1 + proto.Sov(uint64(l)) + l + l = len(orig.EventName) if l > 0 { n += 1 + proto.Sov(uint64(l)) + l @@ -221,23 +290,23 @@ func SizeProtoOrigLogRecord(orig *otlplogs.LogRecord) int { return n } -func MarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) int { +func (orig *LogRecord) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano)) pos-- buf[pos] = 0x9 } - if orig.ObservedTimeUnixNano != 0 { + if orig.ObservedTimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.ObservedTimeUnixNano)) pos-- buf[pos] = 0x59 } - if orig.SeverityNumber != 0 { + if orig.SeverityNumber != SeverityNumber(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.SeverityNumber)) pos-- buf[pos] = 0x10 @@ -250,39 +319,37 @@ func MarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) int { pos-- buf[pos] = 0x1a } - - l = MarshalProtoOrigAnyValue(&orig.Body, buf[:pos]) + l = orig.Body.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x2a for i := len(orig.Attributes) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos]) + l = orig.Attributes[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x32 } - if orig.DroppedAttributesCount != 0 { + if orig.DroppedAttributesCount != uint32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount)) pos-- buf[pos] = 0x38 } - if orig.Flags != 0 { + if orig.Flags != uint32(0) { pos -= 4 binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.Flags)) pos-- buf[pos] = 0x45 } - - l = MarshalProtoOrigTraceID(&orig.TraceId, buf[:pos]) + l = orig.TraceId.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x4a - l = MarshalProtoOrigSpanID(&orig.SpanId, buf[:pos]) + l = orig.SpanId.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- @@ -299,7 +366,7 @@ func MarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) int { return len(buf) - pos } -func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error { +func (orig *LogRecord) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -347,8 +414,7 @@ func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error { if err != nil { return err } - - orig.SeverityNumber = otlplogs.SeverityNumber(num) + orig.SeverityNumber = SeverityNumber(num) case 3: if wireType != proto.WireTypeLen { @@ -373,7 +439,7 @@ func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error { } startPos := pos - length - err = UnmarshalProtoOrigAnyValue(&orig.Body, buf[startPos:pos]) + err = orig.Body.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -388,8 +454,8 @@ func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error { return err } startPos := pos - length - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos]) + orig.Attributes = append(orig.Attributes, KeyValue{}) + err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -403,7 +469,6 @@ func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error { if err != nil { return err } - orig.DroppedAttributesCount = uint32(num) case 8: @@ -429,7 +494,7 @@ func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error { } startPos := pos - length - err = UnmarshalProtoOrigTraceID(&orig.TraceId, buf[startPos:pos]) + err = orig.TraceId.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -445,7 +510,7 @@ func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error { } startPos := pos - length - err = UnmarshalProtoOrigSpanID(&orig.SpanId, buf[startPos:pos]) + err = orig.SpanId.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -470,3 +535,36 @@ func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error { } return nil } + +func GenTestLogRecord() *LogRecord { + orig := NewLogRecord() + orig.TimeUnixNano = uint64(13) + orig.ObservedTimeUnixNano = uint64(13) + orig.SeverityNumber = SeverityNumber(13) + orig.SeverityText = "test_severitytext" + orig.Body = *GenTestAnyValue() + orig.Attributes = []KeyValue{{}, *GenTestKeyValue()} + orig.DroppedAttributesCount = uint32(13) + orig.Flags = uint32(13) + orig.TraceId = *GenTestTraceID() + orig.SpanId = *GenTestSpanID() + orig.EventName = "test_eventname" + return orig +} + +func GenTestLogRecordPtrSlice() []*LogRecord { + orig := make([]*LogRecord, 5) + orig[0] = NewLogRecord() + orig[1] = GenTestLogRecord() + orig[2] = NewLogRecord() + orig[3] = GenTestLogRecord() + orig[4] = NewLogRecord() + return orig +} + +func GenTestLogRecordSlice() []LogRecord { + orig := make([]LogRecord, 5) + orig[1] = *GenTestLogRecord() + orig[3] = *GenTestLogRecord() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsdata.go new file mode 100644 index 00000000000..747684f1c6b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsdata.go @@ -0,0 +1,245 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// LogsData represents the logs data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP logs data but do not +// implement the OTLP protocol. +type LogsData struct { + ResourceLogs []*ResourceLogs +} + +var ( + protoPoolLogsData = sync.Pool{ + New: func() any { + return &LogsData{} + }, + } +) + +func NewLogsData() *LogsData { + if !UseProtoPooling.IsEnabled() { + return &LogsData{} + } + return protoPoolLogsData.Get().(*LogsData) +} + +func DeleteLogsData(orig *LogsData, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.ResourceLogs { + DeleteResourceLogs(orig.ResourceLogs[i], true) + } + orig.Reset() + if nullable { + protoPoolLogsData.Put(orig) + } +} + +func CopyLogsData(dest, src *LogsData) *LogsData { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewLogsData() + } + dest.ResourceLogs = CopyResourceLogsPtrSlice(dest.ResourceLogs, src.ResourceLogs) + + return dest +} + +func CopyLogsDataSlice(dest, src []LogsData) []LogsData { + var newDest []LogsData + if cap(dest) < len(src) { + newDest = make([]LogsData, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteLogsData(&dest[i], false) + } + } + for i := range src { + CopyLogsData(&newDest[i], &src[i]) + } + return newDest +} + +func CopyLogsDataPtrSlice(dest, src []*LogsData) []*LogsData { + var newDest []*LogsData + if cap(dest) < len(src) { + newDest = make([]*LogsData, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewLogsData() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteLogsData(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewLogsData() + } + } + for i := range src { + CopyLogsData(newDest[i], src[i]) + } + return newDest +} + +func (orig *LogsData) Reset() { + *orig = LogsData{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *LogsData) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.ResourceLogs) > 0 { + dest.WriteObjectField("resourceLogs") + dest.WriteArrayStart() + orig.ResourceLogs[0].MarshalJSON(dest) + for i := 1; i < len(orig.ResourceLogs); i++ { + dest.WriteMore() + orig.ResourceLogs[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *LogsData) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "resourceLogs", "resource_logs": + for iter.ReadArray() { + orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs()) + orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *LogsData) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.ResourceLogs { + l = orig.ResourceLogs[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *LogsData) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.ResourceLogs) - 1; i >= 0; i-- { + l = orig.ResourceLogs[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + return len(buf) - pos +} + +func (orig *LogsData) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs()) + err = orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestLogsData() *LogsData { + orig := NewLogsData() + orig.ResourceLogs = []*ResourceLogs{{}, GenTestResourceLogs()} + return orig +} + +func GenTestLogsDataPtrSlice() []*LogsData { + orig := make([]*LogsData, 5) + orig[0] = NewLogsData() + orig[1] = GenTestLogsData() + orig[2] = NewLogsData() + orig[3] = GenTestLogsData() + orig[4] = NewLogsData() + return orig +} + +func GenTestLogsDataSlice() []LogsData { + orig := make([]LogsData, 5) + orig[1] = *GenTestLogsData() + orig[3] = *GenTestLogsData() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsrequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsrequest.go new file mode 100644 index 00000000000..94b915df049 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsrequest.go @@ -0,0 +1,299 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "encoding/binary" + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +type LogsRequest struct { + RequestContext *RequestContext + LogsData LogsData + FormatVersion uint32 +} + +var ( + protoPoolLogsRequest = sync.Pool{ + New: func() any { + return &LogsRequest{} + }, + } +) + +func NewLogsRequest() *LogsRequest { + if !UseProtoPooling.IsEnabled() { + return &LogsRequest{} + } + return protoPoolLogsRequest.Get().(*LogsRequest) +} + +func DeleteLogsRequest(orig *LogsRequest, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteRequestContext(orig.RequestContext, true) + DeleteLogsData(&orig.LogsData, false) + + orig.Reset() + if nullable { + protoPoolLogsRequest.Put(orig) + } +} + +func CopyLogsRequest(dest, src *LogsRequest) *LogsRequest { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewLogsRequest() + } + dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext) + + CopyLogsData(&dest.LogsData, &src.LogsData) + + dest.FormatVersion = src.FormatVersion + + return dest +} + +func CopyLogsRequestSlice(dest, src []LogsRequest) []LogsRequest { + var newDest []LogsRequest + if cap(dest) < len(src) { + newDest = make([]LogsRequest, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteLogsRequest(&dest[i], false) + } + } + for i := range src { + CopyLogsRequest(&newDest[i], &src[i]) + } + return newDest +} + +func CopyLogsRequestPtrSlice(dest, src []*LogsRequest) []*LogsRequest { + var newDest []*LogsRequest + if cap(dest) < len(src) { + newDest = make([]*LogsRequest, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewLogsRequest() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteLogsRequest(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewLogsRequest() + } + } + for i := range src { + CopyLogsRequest(newDest[i], src[i]) + } + return newDest +} + +func (orig *LogsRequest) Reset() { + *orig = LogsRequest{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *LogsRequest) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.RequestContext != nil { + dest.WriteObjectField("requestContext") + orig.RequestContext.MarshalJSON(dest) + } + dest.WriteObjectField("logsData") + orig.LogsData.MarshalJSON(dest) + if orig.FormatVersion != uint32(0) { + dest.WriteObjectField("formatVersion") + dest.WriteUint32(orig.FormatVersion) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *LogsRequest) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "requestContext", "request_context": + orig.RequestContext = NewRequestContext() + orig.RequestContext.UnmarshalJSON(iter) + case "logsData", "logs_data": + + orig.LogsData.UnmarshalJSON(iter) + case "formatVersion", "format_version": + orig.FormatVersion = iter.ReadUint32() + default: + iter.Skip() + } + } +} + +func (orig *LogsRequest) SizeProto() int { + var n int + var l int + _ = l + if orig.RequestContext != nil { + l = orig.RequestContext.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + l = orig.LogsData.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + if orig.FormatVersion != uint32(0) { + n += 5 + } + return n +} + +func (orig *LogsRequest) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + if orig.RequestContext != nil { + l = orig.RequestContext.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + l = orig.LogsData.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + + if orig.FormatVersion != uint32(0) { + pos -= 4 + binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion)) + pos-- + buf[pos] = 0xd + } + return len(buf) - pos +} + +func (orig *LogsRequest) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + orig.RequestContext = NewRequestContext() + err = orig.RequestContext.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field LogsData", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.LogsData.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 1: + if wireType != proto.WireTypeI32 { + return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType) + } + var num uint32 + num, pos, err = proto.ConsumeI32(buf, pos) + if err != nil { + return err + } + + orig.FormatVersion = uint32(num) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestLogsRequest() *LogsRequest { + orig := NewLogsRequest() + orig.RequestContext = GenTestRequestContext() + orig.LogsData = *GenTestLogsData() + orig.FormatVersion = uint32(13) + return orig +} + +func GenTestLogsRequestPtrSlice() []*LogsRequest { + orig := make([]*LogsRequest, 5) + orig[0] = NewLogsRequest() + orig[1] = GenTestLogsRequest() + orig[2] = NewLogsRequest() + orig[3] = GenTestLogsRequest() + orig[4] = NewLogsRequest() + return orig +} + +func GenTestLogsRequestSlice() []LogsRequest { + orig := make([]LogsRequest, 5) + orig[1] = *GenTestLogsRequest() + orig[3] = *GenTestLogsRequest() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_mapping.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_mapping.go similarity index 64% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_mapping.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_mapping.go index b9220b05637..8a91a076290 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_mapping.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_mapping.go @@ -10,27 +10,35 @@ import ( "fmt" "sync" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// Mapping describes the mapping of a binary in memory, including its address range, file offset, and metadata like build ID +type Mapping struct { + AttributeIndices []int32 + MemoryStart uint64 + MemoryLimit uint64 + FileOffset uint64 + FilenameStrindex int32 +} + var ( protoPoolMapping = sync.Pool{ New: func() any { - return &otlpprofiles.Mapping{} + return &Mapping{} }, } ) -func NewOrigMapping() *otlpprofiles.Mapping { +func NewMapping() *Mapping { if !UseProtoPooling.IsEnabled() { - return &otlpprofiles.Mapping{} + return &Mapping{} } - return protoPoolMapping.Get().(*otlpprofiles.Mapping) + return protoPoolMapping.Get().(*Mapping) } -func DeleteOrigMapping(orig *otlpprofiles.Mapping, nullable bool) { +func DeleteMapping(orig *Mapping, nullable bool) { if orig == nil { return } @@ -46,30 +54,82 @@ func DeleteOrigMapping(orig *otlpprofiles.Mapping, nullable bool) { } } -func CopyOrigMapping(dest, src *otlpprofiles.Mapping) { +func CopyMapping(dest, src *Mapping) *Mapping { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewMapping() } dest.MemoryStart = src.MemoryStart dest.MemoryLimit = src.MemoryLimit dest.FileOffset = src.FileOffset dest.FilenameStrindex = src.FilenameStrindex - dest.AttributeIndices = CopyOrigInt32Slice(dest.AttributeIndices, src.AttributeIndices) + dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...) + + return dest } -func GenTestOrigMapping() *otlpprofiles.Mapping { - orig := NewOrigMapping() - orig.MemoryStart = uint64(13) - orig.MemoryLimit = uint64(13) - orig.FileOffset = uint64(13) - orig.FilenameStrindex = int32(13) - orig.AttributeIndices = GenerateOrigTestInt32Slice() - return orig +func CopyMappingSlice(dest, src []Mapping) []Mapping { + var newDest []Mapping + if cap(dest) < len(src) { + newDest = make([]Mapping, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteMapping(&dest[i], false) + } + } + for i := range src { + CopyMapping(&newDest[i], &src[i]) + } + return newDest } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigMapping(orig *otlpprofiles.Mapping, dest *json.Stream) { +func CopyMappingPtrSlice(dest, src []*Mapping) []*Mapping { + var newDest []*Mapping + if cap(dest) < len(src) { + newDest = make([]*Mapping, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewMapping() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteMapping(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewMapping() + } + } + for i := range src { + CopyMapping(newDest[i], src[i]) + } + return newDest +} + +func (orig *Mapping) Reset() { + *orig = Mapping{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Mapping) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() if orig.MemoryStart != uint64(0) { dest.WriteObjectField("memoryStart") @@ -97,11 +157,12 @@ func MarshalJSONOrigMapping(orig *otlpprofiles.Mapping, dest *json.Stream) { } dest.WriteArrayEnd() } + dest.WriteObjectEnd() } -// UnmarshalJSONOrigMapping unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigMapping(orig *otlpprofiles.Mapping, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Mapping) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "memoryStart", "memory_start": @@ -123,22 +184,23 @@ func UnmarshalJSONOrigMapping(orig *otlpprofiles.Mapping, iter *json.Iterator) { } } -func SizeProtoOrigMapping(orig *otlpprofiles.Mapping) int { +func (orig *Mapping) SizeProto() int { var n int var l int _ = l - if orig.MemoryStart != 0 { + if orig.MemoryStart != uint64(0) { n += 1 + proto.Sov(uint64(orig.MemoryStart)) } - if orig.MemoryLimit != 0 { + if orig.MemoryLimit != uint64(0) { n += 1 + proto.Sov(uint64(orig.MemoryLimit)) } - if orig.FileOffset != 0 { + if orig.FileOffset != uint64(0) { n += 1 + proto.Sov(uint64(orig.FileOffset)) } - if orig.FilenameStrindex != 0 { + if orig.FilenameStrindex != int32(0) { n += 1 + proto.Sov(uint64(orig.FilenameStrindex)) } + if len(orig.AttributeIndices) > 0 { l = 0 for _, e := range orig.AttributeIndices { @@ -149,26 +211,26 @@ func SizeProtoOrigMapping(orig *otlpprofiles.Mapping) int { return n } -func MarshalProtoOrigMapping(orig *otlpprofiles.Mapping, buf []byte) int { +func (orig *Mapping) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l - if orig.MemoryStart != 0 { + if orig.MemoryStart != uint64(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.MemoryStart)) pos-- buf[pos] = 0x8 } - if orig.MemoryLimit != 0 { + if orig.MemoryLimit != uint64(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.MemoryLimit)) pos-- buf[pos] = 0x10 } - if orig.FileOffset != 0 { + if orig.FileOffset != uint64(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.FileOffset)) pos-- buf[pos] = 0x18 } - if orig.FilenameStrindex != 0 { + if orig.FilenameStrindex != int32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.FilenameStrindex)) pos-- buf[pos] = 0x20 @@ -186,7 +248,7 @@ func MarshalProtoOrigMapping(orig *otlpprofiles.Mapping, buf []byte) int { return len(buf) - pos } -func UnmarshalProtoOrigMapping(orig *otlpprofiles.Mapping, buf []byte) error { +func (orig *Mapping) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -210,7 +272,6 @@ func UnmarshalProtoOrigMapping(orig *otlpprofiles.Mapping, buf []byte) error { if err != nil { return err } - orig.MemoryStart = uint64(num) case 2: @@ -222,7 +283,6 @@ func UnmarshalProtoOrigMapping(orig *otlpprofiles.Mapping, buf []byte) error { if err != nil { return err } - orig.MemoryLimit = uint64(num) case 3: @@ -234,7 +294,6 @@ func UnmarshalProtoOrigMapping(orig *otlpprofiles.Mapping, buf []byte) error { if err != nil { return err } - orig.FileOffset = uint64(num) case 4: @@ -246,7 +305,6 @@ func UnmarshalProtoOrigMapping(orig *otlpprofiles.Mapping, buf []byte) error { if err != nil { return err } - orig.FilenameStrindex = int32(num) case 5: switch wireType { @@ -287,3 +345,30 @@ func UnmarshalProtoOrigMapping(orig *otlpprofiles.Mapping, buf []byte) error { } return nil } + +func GenTestMapping() *Mapping { + orig := NewMapping() + orig.MemoryStart = uint64(13) + orig.MemoryLimit = uint64(13) + orig.FileOffset = uint64(13) + orig.FilenameStrindex = int32(13) + orig.AttributeIndices = []int32{int32(0), int32(13)} + return orig +} + +func GenTestMappingPtrSlice() []*Mapping { + orig := make([]*Mapping, 5) + orig[0] = NewMapping() + orig[1] = GenTestMapping() + orig[2] = NewMapping() + orig[3] = GenTestMapping() + orig[4] = NewMapping() + return orig +} + +func GenTestMappingSlice() []Mapping { + orig := make([]Mapping, 5) + orig[1] = *GenTestMapping() + orig[3] = *GenTestMapping() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metric.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metric.go new file mode 100644 index 00000000000..6e5fa339f8a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metric.go @@ -0,0 +1,798 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +func (m *Metric) GetData() any { + if m != nil { + return m.Data + } + return nil +} + +type Metric_Gauge struct { + Gauge *Gauge +} + +func (m *Metric) GetGauge() *Gauge { + if v, ok := m.GetData().(*Metric_Gauge); ok { + return v.Gauge + } + return nil +} + +type Metric_Sum struct { + Sum *Sum +} + +func (m *Metric) GetSum() *Sum { + if v, ok := m.GetData().(*Metric_Sum); ok { + return v.Sum + } + return nil +} + +type Metric_Histogram struct { + Histogram *Histogram +} + +func (m *Metric) GetHistogram() *Histogram { + if v, ok := m.GetData().(*Metric_Histogram); ok { + return v.Histogram + } + return nil +} + +type Metric_ExponentialHistogram struct { + ExponentialHistogram *ExponentialHistogram +} + +func (m *Metric) GetExponentialHistogram() *ExponentialHistogram { + if v, ok := m.GetData().(*Metric_ExponentialHistogram); ok { + return v.ExponentialHistogram + } + return nil +} + +type Metric_Summary struct { + Summary *Summary +} + +func (m *Metric) GetSummary() *Summary { + if v, ok := m.GetData().(*Metric_Summary); ok { + return v.Summary + } + return nil +} + +// Metric represents one metric as a collection of datapoints. +// See Metric definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto +type Metric struct { + Name string + Description string + Unit string + Data any + Metadata []KeyValue +} + +var ( + protoPoolMetric = sync.Pool{ + New: func() any { + return &Metric{} + }, + } + + ProtoPoolMetric_Gauge = sync.Pool{ + New: func() any { + return &Metric_Gauge{} + }, + } + + ProtoPoolMetric_Sum = sync.Pool{ + New: func() any { + return &Metric_Sum{} + }, + } + + ProtoPoolMetric_Histogram = sync.Pool{ + New: func() any { + return &Metric_Histogram{} + }, + } + + ProtoPoolMetric_ExponentialHistogram = sync.Pool{ + New: func() any { + return &Metric_ExponentialHistogram{} + }, + } + + ProtoPoolMetric_Summary = sync.Pool{ + New: func() any { + return &Metric_Summary{} + }, + } +) + +func NewMetric() *Metric { + if !UseProtoPooling.IsEnabled() { + return &Metric{} + } + return protoPoolMetric.Get().(*Metric) +} + +func DeleteMetric(orig *Metric, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + switch ov := orig.Data.(type) { + case *Metric_Gauge: + DeleteGauge(ov.Gauge, true) + ov.Gauge = nil + ProtoPoolMetric_Gauge.Put(ov) + case *Metric_Sum: + DeleteSum(ov.Sum, true) + ov.Sum = nil + ProtoPoolMetric_Sum.Put(ov) + case *Metric_Histogram: + DeleteHistogram(ov.Histogram, true) + ov.Histogram = nil + ProtoPoolMetric_Histogram.Put(ov) + case *Metric_ExponentialHistogram: + DeleteExponentialHistogram(ov.ExponentialHistogram, true) + ov.ExponentialHistogram = nil + ProtoPoolMetric_ExponentialHistogram.Put(ov) + case *Metric_Summary: + DeleteSummary(ov.Summary, true) + ov.Summary = nil + ProtoPoolMetric_Summary.Put(ov) + } + for i := range orig.Metadata { + DeleteKeyValue(&orig.Metadata[i], false) + } + orig.Reset() + if nullable { + protoPoolMetric.Put(orig) + } +} + +func CopyMetric(dest, src *Metric) *Metric { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewMetric() + } + dest.Name = src.Name + dest.Description = src.Description + dest.Unit = src.Unit + switch t := src.Data.(type) { + case *Metric_Gauge: + var ov *Metric_Gauge + if !UseProtoPooling.IsEnabled() { + ov = &Metric_Gauge{} + } else { + ov = ProtoPoolMetric_Gauge.Get().(*Metric_Gauge) + } + ov.Gauge = NewGauge() + CopyGauge(ov.Gauge, t.Gauge) + dest.Data = ov + + case *Metric_Sum: + var ov *Metric_Sum + if !UseProtoPooling.IsEnabled() { + ov = &Metric_Sum{} + } else { + ov = ProtoPoolMetric_Sum.Get().(*Metric_Sum) + } + ov.Sum = NewSum() + CopySum(ov.Sum, t.Sum) + dest.Data = ov + + case *Metric_Histogram: + var ov *Metric_Histogram + if !UseProtoPooling.IsEnabled() { + ov = &Metric_Histogram{} + } else { + ov = ProtoPoolMetric_Histogram.Get().(*Metric_Histogram) + } + ov.Histogram = NewHistogram() + CopyHistogram(ov.Histogram, t.Histogram) + dest.Data = ov + + case *Metric_ExponentialHistogram: + var ov *Metric_ExponentialHistogram + if !UseProtoPooling.IsEnabled() { + ov = &Metric_ExponentialHistogram{} + } else { + ov = ProtoPoolMetric_ExponentialHistogram.Get().(*Metric_ExponentialHistogram) + } + ov.ExponentialHistogram = NewExponentialHistogram() + CopyExponentialHistogram(ov.ExponentialHistogram, t.ExponentialHistogram) + dest.Data = ov + + case *Metric_Summary: + var ov *Metric_Summary + if !UseProtoPooling.IsEnabled() { + ov = &Metric_Summary{} + } else { + ov = ProtoPoolMetric_Summary.Get().(*Metric_Summary) + } + ov.Summary = NewSummary() + CopySummary(ov.Summary, t.Summary) + dest.Data = ov + + default: + dest.Data = nil + } + dest.Metadata = CopyKeyValueSlice(dest.Metadata, src.Metadata) + + return dest +} + +func CopyMetricSlice(dest, src []Metric) []Metric { + var newDest []Metric + if cap(dest) < len(src) { + newDest = make([]Metric, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteMetric(&dest[i], false) + } + } + for i := range src { + CopyMetric(&newDest[i], &src[i]) + } + return newDest +} + +func CopyMetricPtrSlice(dest, src []*Metric) []*Metric { + var newDest []*Metric + if cap(dest) < len(src) { + newDest = make([]*Metric, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewMetric() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteMetric(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewMetric() + } + } + for i := range src { + CopyMetric(newDest[i], src[i]) + } + return newDest +} + +func (orig *Metric) Reset() { + *orig = Metric{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Metric) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.Name != "" { + dest.WriteObjectField("name") + dest.WriteString(orig.Name) + } + if orig.Description != "" { + dest.WriteObjectField("description") + dest.WriteString(orig.Description) + } + if orig.Unit != "" { + dest.WriteObjectField("unit") + dest.WriteString(orig.Unit) + } + switch orig := orig.Data.(type) { + case *Metric_Gauge: + if orig.Gauge != nil { + dest.WriteObjectField("gauge") + orig.Gauge.MarshalJSON(dest) + } + case *Metric_Sum: + if orig.Sum != nil { + dest.WriteObjectField("sum") + orig.Sum.MarshalJSON(dest) + } + case *Metric_Histogram: + if orig.Histogram != nil { + dest.WriteObjectField("histogram") + orig.Histogram.MarshalJSON(dest) + } + case *Metric_ExponentialHistogram: + if orig.ExponentialHistogram != nil { + dest.WriteObjectField("exponentialHistogram") + orig.ExponentialHistogram.MarshalJSON(dest) + } + case *Metric_Summary: + if orig.Summary != nil { + dest.WriteObjectField("summary") + orig.Summary.MarshalJSON(dest) + } + } + if len(orig.Metadata) > 0 { + dest.WriteObjectField("metadata") + dest.WriteArrayStart() + orig.Metadata[0].MarshalJSON(dest) + for i := 1; i < len(orig.Metadata); i++ { + dest.WriteMore() + orig.Metadata[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Metric) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "name": + orig.Name = iter.ReadString() + case "description": + orig.Description = iter.ReadString() + case "unit": + orig.Unit = iter.ReadString() + + case "gauge": + { + var ov *Metric_Gauge + if !UseProtoPooling.IsEnabled() { + ov = &Metric_Gauge{} + } else { + ov = ProtoPoolMetric_Gauge.Get().(*Metric_Gauge) + } + ov.Gauge = NewGauge() + ov.Gauge.UnmarshalJSON(iter) + orig.Data = ov + } + case "sum": + { + var ov *Metric_Sum + if !UseProtoPooling.IsEnabled() { + ov = &Metric_Sum{} + } else { + ov = ProtoPoolMetric_Sum.Get().(*Metric_Sum) + } + ov.Sum = NewSum() + ov.Sum.UnmarshalJSON(iter) + orig.Data = ov + } + case "histogram": + { + var ov *Metric_Histogram + if !UseProtoPooling.IsEnabled() { + ov = &Metric_Histogram{} + } else { + ov = ProtoPoolMetric_Histogram.Get().(*Metric_Histogram) + } + ov.Histogram = NewHistogram() + ov.Histogram.UnmarshalJSON(iter) + orig.Data = ov + } + case "exponentialHistogram", "exponential_histogram": + { + var ov *Metric_ExponentialHistogram + if !UseProtoPooling.IsEnabled() { + ov = &Metric_ExponentialHistogram{} + } else { + ov = ProtoPoolMetric_ExponentialHistogram.Get().(*Metric_ExponentialHistogram) + } + ov.ExponentialHistogram = NewExponentialHistogram() + ov.ExponentialHistogram.UnmarshalJSON(iter) + orig.Data = ov + } + case "summary": + { + var ov *Metric_Summary + if !UseProtoPooling.IsEnabled() { + ov = &Metric_Summary{} + } else { + ov = ProtoPoolMetric_Summary.Get().(*Metric_Summary) + } + ov.Summary = NewSummary() + ov.Summary.UnmarshalJSON(iter) + orig.Data = ov + } + + case "metadata": + for iter.ReadArray() { + orig.Metadata = append(orig.Metadata, KeyValue{}) + orig.Metadata[len(orig.Metadata)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *Metric) SizeProto() int { + var n int + var l int + _ = l + + l = len(orig.Name) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.Description) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.Unit) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + switch orig := orig.Data.(type) { + case nil: + _ = orig + break + case *Metric_Gauge: + if orig.Gauge != nil { + l = orig.Gauge.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + case *Metric_Sum: + if orig.Sum != nil { + l = orig.Sum.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + case *Metric_Histogram: + if orig.Histogram != nil { + l = orig.Histogram.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + case *Metric_ExponentialHistogram: + if orig.ExponentialHistogram != nil { + l = orig.ExponentialHistogram.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + case *Metric_Summary: + if orig.Summary != nil { + l = orig.Summary.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + } + for i := range orig.Metadata { + l = orig.Metadata[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *Metric) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = len(orig.Name) + if l > 0 { + pos -= l + copy(buf[pos:], orig.Name) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + l = len(orig.Description) + if l > 0 { + pos -= l + copy(buf[pos:], orig.Description) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + l = len(orig.Unit) + if l > 0 { + pos -= l + copy(buf[pos:], orig.Unit) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + switch orig := orig.Data.(type) { + case *Metric_Gauge: + if orig.Gauge != nil { + l = orig.Gauge.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x2a + } + case *Metric_Sum: + if orig.Sum != nil { + l = orig.Sum.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x3a + } + case *Metric_Histogram: + if orig.Histogram != nil { + l = orig.Histogram.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x4a + } + case *Metric_ExponentialHistogram: + if orig.ExponentialHistogram != nil { + l = orig.ExponentialHistogram.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x52 + } + case *Metric_Summary: + if orig.Summary != nil { + l = orig.Summary.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x5a + } + } + for i := len(orig.Metadata) - 1; i >= 0; i-- { + l = orig.Metadata[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x62 + } + return len(buf) - pos +} + +func (orig *Metric) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Name = string(buf[startPos:pos]) + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Description = string(buf[startPos:pos]) + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Unit = string(buf[startPos:pos]) + + case 5: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var ov *Metric_Gauge + if !UseProtoPooling.IsEnabled() { + ov = &Metric_Gauge{} + } else { + ov = ProtoPoolMetric_Gauge.Get().(*Metric_Gauge) + } + ov.Gauge = NewGauge() + err = ov.Gauge.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + orig.Data = ov + + case 7: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var ov *Metric_Sum + if !UseProtoPooling.IsEnabled() { + ov = &Metric_Sum{} + } else { + ov = ProtoPoolMetric_Sum.Get().(*Metric_Sum) + } + ov.Sum = NewSum() + err = ov.Sum.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + orig.Data = ov + + case 9: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var ov *Metric_Histogram + if !UseProtoPooling.IsEnabled() { + ov = &Metric_Histogram{} + } else { + ov = ProtoPoolMetric_Histogram.Get().(*Metric_Histogram) + } + ov.Histogram = NewHistogram() + err = ov.Histogram.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + orig.Data = ov + + case 10: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var ov *Metric_ExponentialHistogram + if !UseProtoPooling.IsEnabled() { + ov = &Metric_ExponentialHistogram{} + } else { + ov = ProtoPoolMetric_ExponentialHistogram.Get().(*Metric_ExponentialHistogram) + } + ov.ExponentialHistogram = NewExponentialHistogram() + err = ov.ExponentialHistogram.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + orig.Data = ov + + case 11: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var ov *Metric_Summary + if !UseProtoPooling.IsEnabled() { + ov = &Metric_Summary{} + } else { + ov = ProtoPoolMetric_Summary.Get().(*Metric_Summary) + } + ov.Summary = NewSummary() + err = ov.Summary.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + orig.Data = ov + + case 12: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Metadata = append(orig.Metadata, KeyValue{}) + err = orig.Metadata[len(orig.Metadata)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestMetric() *Metric { + orig := NewMetric() + orig.Name = "test_name" + orig.Description = "test_description" + orig.Unit = "test_unit" + orig.Data = &Metric_Gauge{Gauge: GenTestGauge()} + orig.Metadata = []KeyValue{{}, *GenTestKeyValue()} + return orig +} + +func GenTestMetricPtrSlice() []*Metric { + orig := make([]*Metric, 5) + orig[0] = NewMetric() + orig[1] = GenTestMetric() + orig[2] = NewMetric() + orig[3] = GenTestMetric() + orig[4] = NewMetric() + return orig +} + +func GenTestMetricSlice() []Metric { + orig := make([]Metric, 5) + orig[1] = *GenTestMetric() + orig[3] = *GenTestMetric() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsdata.go new file mode 100644 index 00000000000..aa7900f672a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsdata.go @@ -0,0 +1,245 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// MetricsData represents the metrics data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP metrics data but do not +// implement the OTLP protocol.. +type MetricsData struct { + ResourceMetrics []*ResourceMetrics +} + +var ( + protoPoolMetricsData = sync.Pool{ + New: func() any { + return &MetricsData{} + }, + } +) + +func NewMetricsData() *MetricsData { + if !UseProtoPooling.IsEnabled() { + return &MetricsData{} + } + return protoPoolMetricsData.Get().(*MetricsData) +} + +func DeleteMetricsData(orig *MetricsData, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.ResourceMetrics { + DeleteResourceMetrics(orig.ResourceMetrics[i], true) + } + orig.Reset() + if nullable { + protoPoolMetricsData.Put(orig) + } +} + +func CopyMetricsData(dest, src *MetricsData) *MetricsData { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewMetricsData() + } + dest.ResourceMetrics = CopyResourceMetricsPtrSlice(dest.ResourceMetrics, src.ResourceMetrics) + + return dest +} + +func CopyMetricsDataSlice(dest, src []MetricsData) []MetricsData { + var newDest []MetricsData + if cap(dest) < len(src) { + newDest = make([]MetricsData, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteMetricsData(&dest[i], false) + } + } + for i := range src { + CopyMetricsData(&newDest[i], &src[i]) + } + return newDest +} + +func CopyMetricsDataPtrSlice(dest, src []*MetricsData) []*MetricsData { + var newDest []*MetricsData + if cap(dest) < len(src) { + newDest = make([]*MetricsData, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewMetricsData() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteMetricsData(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewMetricsData() + } + } + for i := range src { + CopyMetricsData(newDest[i], src[i]) + } + return newDest +} + +func (orig *MetricsData) Reset() { + *orig = MetricsData{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *MetricsData) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.ResourceMetrics) > 0 { + dest.WriteObjectField("resourceMetrics") + dest.WriteArrayStart() + orig.ResourceMetrics[0].MarshalJSON(dest) + for i := 1; i < len(orig.ResourceMetrics); i++ { + dest.WriteMore() + orig.ResourceMetrics[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *MetricsData) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "resourceMetrics", "resource_metrics": + for iter.ReadArray() { + orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics()) + orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *MetricsData) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.ResourceMetrics { + l = orig.ResourceMetrics[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *MetricsData) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.ResourceMetrics) - 1; i >= 0; i-- { + l = orig.ResourceMetrics[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + return len(buf) - pos +} + +func (orig *MetricsData) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics()) + err = orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestMetricsData() *MetricsData { + orig := NewMetricsData() + orig.ResourceMetrics = []*ResourceMetrics{{}, GenTestResourceMetrics()} + return orig +} + +func GenTestMetricsDataPtrSlice() []*MetricsData { + orig := make([]*MetricsData, 5) + orig[0] = NewMetricsData() + orig[1] = GenTestMetricsData() + orig[2] = NewMetricsData() + orig[3] = GenTestMetricsData() + orig[4] = NewMetricsData() + return orig +} + +func GenTestMetricsDataSlice() []MetricsData { + orig := make([]MetricsData, 5) + orig[1] = *GenTestMetricsData() + orig[3] = *GenTestMetricsData() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsrequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsrequest.go new file mode 100644 index 00000000000..c2cca74a1bc --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsrequest.go @@ -0,0 +1,299 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "encoding/binary" + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +type MetricsRequest struct { + RequestContext *RequestContext + MetricsData MetricsData + FormatVersion uint32 +} + +var ( + protoPoolMetricsRequest = sync.Pool{ + New: func() any { + return &MetricsRequest{} + }, + } +) + +func NewMetricsRequest() *MetricsRequest { + if !UseProtoPooling.IsEnabled() { + return &MetricsRequest{} + } + return protoPoolMetricsRequest.Get().(*MetricsRequest) +} + +func DeleteMetricsRequest(orig *MetricsRequest, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteRequestContext(orig.RequestContext, true) + DeleteMetricsData(&orig.MetricsData, false) + + orig.Reset() + if nullable { + protoPoolMetricsRequest.Put(orig) + } +} + +func CopyMetricsRequest(dest, src *MetricsRequest) *MetricsRequest { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewMetricsRequest() + } + dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext) + + CopyMetricsData(&dest.MetricsData, &src.MetricsData) + + dest.FormatVersion = src.FormatVersion + + return dest +} + +func CopyMetricsRequestSlice(dest, src []MetricsRequest) []MetricsRequest { + var newDest []MetricsRequest + if cap(dest) < len(src) { + newDest = make([]MetricsRequest, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteMetricsRequest(&dest[i], false) + } + } + for i := range src { + CopyMetricsRequest(&newDest[i], &src[i]) + } + return newDest +} + +func CopyMetricsRequestPtrSlice(dest, src []*MetricsRequest) []*MetricsRequest { + var newDest []*MetricsRequest + if cap(dest) < len(src) { + newDest = make([]*MetricsRequest, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewMetricsRequest() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteMetricsRequest(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewMetricsRequest() + } + } + for i := range src { + CopyMetricsRequest(newDest[i], src[i]) + } + return newDest +} + +func (orig *MetricsRequest) Reset() { + *orig = MetricsRequest{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *MetricsRequest) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.RequestContext != nil { + dest.WriteObjectField("requestContext") + orig.RequestContext.MarshalJSON(dest) + } + dest.WriteObjectField("metricsData") + orig.MetricsData.MarshalJSON(dest) + if orig.FormatVersion != uint32(0) { + dest.WriteObjectField("formatVersion") + dest.WriteUint32(orig.FormatVersion) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *MetricsRequest) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "requestContext", "request_context": + orig.RequestContext = NewRequestContext() + orig.RequestContext.UnmarshalJSON(iter) + case "metricsData", "metrics_data": + + orig.MetricsData.UnmarshalJSON(iter) + case "formatVersion", "format_version": + orig.FormatVersion = iter.ReadUint32() + default: + iter.Skip() + } + } +} + +func (orig *MetricsRequest) SizeProto() int { + var n int + var l int + _ = l + if orig.RequestContext != nil { + l = orig.RequestContext.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + l = orig.MetricsData.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + if orig.FormatVersion != uint32(0) { + n += 5 + } + return n +} + +func (orig *MetricsRequest) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + if orig.RequestContext != nil { + l = orig.RequestContext.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + l = orig.MetricsData.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + + if orig.FormatVersion != uint32(0) { + pos -= 4 + binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion)) + pos-- + buf[pos] = 0xd + } + return len(buf) - pos +} + +func (orig *MetricsRequest) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + orig.RequestContext = NewRequestContext() + err = orig.RequestContext.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field MetricsData", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.MetricsData.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 1: + if wireType != proto.WireTypeI32 { + return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType) + } + var num uint32 + num, pos, err = proto.ConsumeI32(buf, pos) + if err != nil { + return err + } + + orig.FormatVersion = uint32(num) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestMetricsRequest() *MetricsRequest { + orig := NewMetricsRequest() + orig.RequestContext = GenTestRequestContext() + orig.MetricsData = *GenTestMetricsData() + orig.FormatVersion = uint32(13) + return orig +} + +func GenTestMetricsRequestPtrSlice() []*MetricsRequest { + orig := make([]*MetricsRequest, 5) + orig[0] = NewMetricsRequest() + orig[1] = GenTestMetricsRequest() + orig[2] = NewMetricsRequest() + orig[3] = GenTestMetricsRequest() + orig[4] = NewMetricsRequest() + return orig +} + +func GenTestMetricsRequestSlice() []MetricsRequest { + orig := make([]MetricsRequest, 5) + orig[1] = *GenTestMetricsRequest() + orig[3] = *GenTestMetricsRequest() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_numberdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_numberdatapoint.go similarity index 51% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_numberdatapoint.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_numberdatapoint.go index 348b0b73f4e..e413d64078e 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_numberdatapoint.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_numberdatapoint.go @@ -12,40 +12,77 @@ import ( "math" "sync" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +func (m *NumberDataPoint) GetValue() any { + if m != nil { + return m.Value + } + return nil +} + +type NumberDataPoint_AsDouble struct { + AsDouble float64 +} + +func (m *NumberDataPoint) GetAsDouble() float64 { + if v, ok := m.GetValue().(*NumberDataPoint_AsDouble); ok { + return v.AsDouble + } + return float64(0) +} + +type NumberDataPoint_AsInt struct { + AsInt int64 +} + +func (m *NumberDataPoint) GetAsInt() int64 { + if v, ok := m.GetValue().(*NumberDataPoint_AsInt); ok { + return v.AsInt + } + return int64(0) +} + +// NumberDataPoint is a single data point in a timeseries that describes the time-varying value of a number metric. +type NumberDataPoint struct { + Value any + Attributes []KeyValue + Exemplars []Exemplar + StartTimeUnixNano uint64 + TimeUnixNano uint64 + Flags uint32 +} + var ( protoPoolNumberDataPoint = sync.Pool{ New: func() any { - return &otlpmetrics.NumberDataPoint{} + return &NumberDataPoint{} }, } ProtoPoolNumberDataPoint_AsDouble = sync.Pool{ New: func() any { - return &otlpmetrics.NumberDataPoint_AsDouble{} + return &NumberDataPoint_AsDouble{} }, } ProtoPoolNumberDataPoint_AsInt = sync.Pool{ New: func() any { - return &otlpmetrics.NumberDataPoint_AsInt{} + return &NumberDataPoint_AsInt{} }, } ) -func NewOrigNumberDataPoint() *otlpmetrics.NumberDataPoint { +func NewNumberDataPoint() *NumberDataPoint { if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.NumberDataPoint{} + return &NumberDataPoint{} } - return protoPoolNumberDataPoint.Get().(*otlpmetrics.NumberDataPoint) + return protoPoolNumberDataPoint.Get().(*NumberDataPoint) } -func DeleteOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, nullable bool) { +func DeleteNumberDataPoint(orig *NumberDataPoint, nullable bool) { if orig == nil { return } @@ -54,25 +91,24 @@ func DeleteOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, nullable bool) orig.Reset() return } - for i := range orig.Attributes { - DeleteOrigKeyValue(&orig.Attributes[i], false) + DeleteKeyValue(&orig.Attributes[i], false) } + switch ov := orig.Value.(type) { - case *otlpmetrics.NumberDataPoint_AsDouble: + case *NumberDataPoint_AsDouble: if UseProtoPooling.IsEnabled() { ov.AsDouble = float64(0) ProtoPoolNumberDataPoint_AsDouble.Put(ov) } - case *otlpmetrics.NumberDataPoint_AsInt: + case *NumberDataPoint_AsInt: if UseProtoPooling.IsEnabled() { ov.AsInt = int64(0) ProtoPoolNumberDataPoint_AsInt.Put(ov) } - } for i := range orig.Exemplars { - DeleteOrigExemplar(&orig.Exemplars[i], false) + DeleteExemplar(&orig.Exemplars[i], false) } orig.Reset() @@ -81,59 +117,116 @@ func DeleteOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, nullable bool) } } -func CopyOrigNumberDataPoint(dest, src *otlpmetrics.NumberDataPoint) { +func CopyNumberDataPoint(dest, src *NumberDataPoint) *NumberDataPoint { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewNumberDataPoint() } - dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes) + dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes) + dest.StartTimeUnixNano = src.StartTimeUnixNano dest.TimeUnixNano = src.TimeUnixNano switch t := src.Value.(type) { - case *otlpmetrics.NumberDataPoint_AsDouble: - var ov *otlpmetrics.NumberDataPoint_AsDouble + case *NumberDataPoint_AsDouble: + var ov *NumberDataPoint_AsDouble if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.NumberDataPoint_AsDouble{} + ov = &NumberDataPoint_AsDouble{} } else { - ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*otlpmetrics.NumberDataPoint_AsDouble) + ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*NumberDataPoint_AsDouble) } ov.AsDouble = t.AsDouble dest.Value = ov - case *otlpmetrics.NumberDataPoint_AsInt: - var ov *otlpmetrics.NumberDataPoint_AsInt + + case *NumberDataPoint_AsInt: + var ov *NumberDataPoint_AsInt if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.NumberDataPoint_AsInt{} + ov = &NumberDataPoint_AsInt{} } else { - ov = ProtoPoolNumberDataPoint_AsInt.Get().(*otlpmetrics.NumberDataPoint_AsInt) + ov = ProtoPoolNumberDataPoint_AsInt.Get().(*NumberDataPoint_AsInt) } ov.AsInt = t.AsInt dest.Value = ov + + default: + dest.Value = nil } - dest.Exemplars = CopyOrigExemplarSlice(dest.Exemplars, src.Exemplars) + dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars) + dest.Flags = src.Flags + + return dest } -func GenTestOrigNumberDataPoint() *otlpmetrics.NumberDataPoint { - orig := NewOrigNumberDataPoint() - orig.Attributes = GenerateOrigTestKeyValueSlice() - orig.StartTimeUnixNano = 1234567890 - orig.TimeUnixNano = 1234567890 - orig.Value = &otlpmetrics.NumberDataPoint_AsDouble{AsDouble: float64(3.1415926)} - orig.Exemplars = GenerateOrigTestExemplarSlice() - orig.Flags = 1 - return orig +func CopyNumberDataPointSlice(dest, src []NumberDataPoint) []NumberDataPoint { + var newDest []NumberDataPoint + if cap(dest) < len(src) { + newDest = make([]NumberDataPoint, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteNumberDataPoint(&dest[i], false) + } + } + for i := range src { + CopyNumberDataPoint(&newDest[i], &src[i]) + } + return newDest } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, dest *json.Stream) { +func CopyNumberDataPointPtrSlice(dest, src []*NumberDataPoint) []*NumberDataPoint { + var newDest []*NumberDataPoint + if cap(dest) < len(src) { + newDest = make([]*NumberDataPoint, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewNumberDataPoint() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteNumberDataPoint(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewNumberDataPoint() + } + } + for i := range src { + CopyNumberDataPoint(newDest[i], src[i]) + } + return newDest +} + +func (orig *NumberDataPoint) Reset() { + *orig = NumberDataPoint{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *NumberDataPoint) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() if len(orig.Attributes) > 0 { dest.WriteObjectField("attributes") dest.WriteArrayStart() - MarshalJSONOrigKeyValue(&orig.Attributes[0], dest) + orig.Attributes[0].MarshalJSON(dest) for i := 1; i < len(orig.Attributes); i++ { dest.WriteMore() - MarshalJSONOrigKeyValue(&orig.Attributes[i], dest) + orig.Attributes[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -146,20 +239,20 @@ func MarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, dest *jso dest.WriteUint64(orig.TimeUnixNano) } switch orig := orig.Value.(type) { - case *otlpmetrics.NumberDataPoint_AsDouble: + case *NumberDataPoint_AsDouble: dest.WriteObjectField("asDouble") dest.WriteFloat64(orig.AsDouble) - case *otlpmetrics.NumberDataPoint_AsInt: + case *NumberDataPoint_AsInt: dest.WriteObjectField("asInt") dest.WriteInt64(orig.AsInt) } if len(orig.Exemplars) > 0 { dest.WriteObjectField("exemplars") dest.WriteArrayStart() - MarshalJSONOrigExemplar(&orig.Exemplars[0], dest) + orig.Exemplars[0].MarshalJSON(dest) for i := 1; i < len(orig.Exemplars); i++ { dest.WriteMore() - MarshalJSONOrigExemplar(&orig.Exemplars[i], dest) + orig.Exemplars[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -170,14 +263,14 @@ func MarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, dest *jso dest.WriteObjectEnd() } -// UnmarshalJSONOrigNumberDataPoint unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *NumberDataPoint) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "attributes": for iter.ReadArray() { - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter) + orig.Attributes = append(orig.Attributes, KeyValue{}) + orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter) } case "startTimeUnixNano", "start_time_unix_nano": @@ -187,23 +280,22 @@ func UnmarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, iter *j case "asDouble", "as_double": { - var ov *otlpmetrics.NumberDataPoint_AsDouble + var ov *NumberDataPoint_AsDouble if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.NumberDataPoint_AsDouble{} + ov = &NumberDataPoint_AsDouble{} } else { - ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*otlpmetrics.NumberDataPoint_AsDouble) + ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*NumberDataPoint_AsDouble) } ov.AsDouble = iter.ReadFloat64() orig.Value = ov } - case "asInt", "as_int": { - var ov *otlpmetrics.NumberDataPoint_AsInt + var ov *NumberDataPoint_AsInt if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.NumberDataPoint_AsInt{} + ov = &NumberDataPoint_AsInt{} } else { - ov = ProtoPoolNumberDataPoint_AsInt.Get().(*otlpmetrics.NumberDataPoint_AsInt) + ov = ProtoPoolNumberDataPoint_AsInt.Get().(*NumberDataPoint_AsInt) } ov.AsInt = iter.ReadInt64() orig.Value = ov @@ -211,8 +303,8 @@ func UnmarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, iter *j case "exemplars": for iter.ReadArray() { - orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{}) - UnmarshalJSONOrigExemplar(&orig.Exemplars[len(orig.Exemplars)-1], iter) + orig.Exemplars = append(orig.Exemplars, Exemplar{}) + orig.Exemplars[len(orig.Exemplars)-1].UnmarshalJSON(iter) } case "flags": @@ -223,70 +315,72 @@ func UnmarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, iter *j } } -func SizeProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint) int { +func (orig *NumberDataPoint) SizeProto() int { var n int var l int _ = l for i := range orig.Attributes { - l = SizeProtoOrigKeyValue(&orig.Attributes[i]) + l = orig.Attributes[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.StartTimeUnixNano != 0 { + if orig.StartTimeUnixNano != uint64(0) { n += 9 } - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { n += 9 } switch orig := orig.Value.(type) { case nil: _ = orig break - case *otlpmetrics.NumberDataPoint_AsDouble: + case *NumberDataPoint_AsDouble: + n += 9 - case *otlpmetrics.NumberDataPoint_AsInt: + case *NumberDataPoint_AsInt: + n += 9 } for i := range orig.Exemplars { - l = SizeProtoOrigExemplar(&orig.Exemplars[i]) + l = orig.Exemplars[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.Flags != 0 { + if orig.Flags != uint32(0) { n += 1 + proto.Sov(uint64(orig.Flags)) } return n } -func MarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []byte) int { +func (orig *NumberDataPoint) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l for i := len(orig.Attributes) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos]) + l = orig.Attributes[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x3a } - if orig.StartTimeUnixNano != 0 { + if orig.StartTimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano)) pos-- buf[pos] = 0x11 } - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano)) pos-- buf[pos] = 0x19 } switch orig := orig.Value.(type) { - case *otlpmetrics.NumberDataPoint_AsDouble: + case *NumberDataPoint_AsDouble: pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.AsDouble)) pos-- buf[pos] = 0x21 - case *otlpmetrics.NumberDataPoint_AsInt: + case *NumberDataPoint_AsInt: pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.AsInt)) pos-- @@ -294,13 +388,13 @@ func MarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []by } for i := len(orig.Exemplars) - 1; i >= 0; i-- { - l = MarshalProtoOrigExemplar(&orig.Exemplars[i], buf[:pos]) + l = orig.Exemplars[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x2a } - if orig.Flags != 0 { + if orig.Flags != uint32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags)) pos-- buf[pos] = 0x40 @@ -308,7 +402,7 @@ func MarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []by return len(buf) - pos } -func UnmarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []byte) error { +func (orig *NumberDataPoint) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -333,8 +427,8 @@ func UnmarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf [] return err } startPos := pos - length - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos]) + orig.Attributes = append(orig.Attributes, KeyValue{}) + err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -372,11 +466,11 @@ func UnmarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf [] if err != nil { return err } - var ov *otlpmetrics.NumberDataPoint_AsDouble + var ov *NumberDataPoint_AsDouble if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.NumberDataPoint_AsDouble{} + ov = &NumberDataPoint_AsDouble{} } else { - ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*otlpmetrics.NumberDataPoint_AsDouble) + ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*NumberDataPoint_AsDouble) } ov.AsDouble = math.Float64frombits(num) orig.Value = ov @@ -390,11 +484,11 @@ func UnmarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf [] if err != nil { return err } - var ov *otlpmetrics.NumberDataPoint_AsInt + var ov *NumberDataPoint_AsInt if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.NumberDataPoint_AsInt{} + ov = &NumberDataPoint_AsInt{} } else { - ov = ProtoPoolNumberDataPoint_AsInt.Get().(*otlpmetrics.NumberDataPoint_AsInt) + ov = ProtoPoolNumberDataPoint_AsInt.Get().(*NumberDataPoint_AsInt) } ov.AsInt = int64(num) orig.Value = ov @@ -409,8 +503,8 @@ func UnmarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf [] return err } startPos := pos - length - orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{}) - err = UnmarshalProtoOrigExemplar(&orig.Exemplars[len(orig.Exemplars)-1], buf[startPos:pos]) + orig.Exemplars = append(orig.Exemplars, Exemplar{}) + err = orig.Exemplars[len(orig.Exemplars)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -424,7 +518,6 @@ func UnmarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf [] if err != nil { return err } - orig.Flags = uint32(num) default: pos, err = proto.ConsumeUnknown(buf, pos, wireType) @@ -435,3 +528,31 @@ func UnmarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf [] } return nil } + +func GenTestNumberDataPoint() *NumberDataPoint { + orig := NewNumberDataPoint() + orig.Attributes = []KeyValue{{}, *GenTestKeyValue()} + orig.StartTimeUnixNano = uint64(13) + orig.TimeUnixNano = uint64(13) + orig.Value = &NumberDataPoint_AsDouble{AsDouble: float64(3.1415926)} + orig.Exemplars = []Exemplar{{}, *GenTestExemplar()} + orig.Flags = uint32(13) + return orig +} + +func GenTestNumberDataPointPtrSlice() []*NumberDataPoint { + orig := make([]*NumberDataPoint, 5) + orig[0] = NewNumberDataPoint() + orig[1] = GenTestNumberDataPoint() + orig[2] = NewNumberDataPoint() + orig[3] = GenTestNumberDataPoint() + orig[4] = NewNumberDataPoint() + return orig +} + +func GenTestNumberDataPointSlice() []NumberDataPoint { + orig := make([]NumberDataPoint, 5) + orig[1] = *GenTestNumberDataPoint() + orig[3] = *GenTestNumberDataPoint() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profile.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profile.go similarity index 62% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profile.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profile.go index c3b3994a7eb..02167667df3 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profile.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profile.go @@ -11,28 +11,42 @@ import ( "fmt" "sync" - "go.opentelemetry.io/collector/pdata/internal/data" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// Profile are an implementation of the pprofextended data model. + +type Profile struct { + OriginalPayloadFormat string + Samples []*Sample + OriginalPayload []byte + AttributeIndices []int32 + TimeUnixNano uint64 + DurationNano uint64 + Period int64 + SampleType ValueType + PeriodType ValueType + DroppedAttributesCount uint32 + ProfileId ProfileID +} + var ( protoPoolProfile = sync.Pool{ New: func() any { - return &otlpprofiles.Profile{} + return &Profile{} }, } ) -func NewOrigProfile() *otlpprofiles.Profile { +func NewProfile() *Profile { if !UseProtoPooling.IsEnabled() { - return &otlpprofiles.Profile{} + return &Profile{} } - return protoPoolProfile.Get().(*otlpprofiles.Profile) + return protoPoolProfile.Get().(*Profile) } -func DeleteOrigProfile(orig *otlpprofiles.Profile, nullable bool) { +func DeleteProfile(orig *Profile, nullable bool) { if orig == nil { return } @@ -41,13 +55,14 @@ func DeleteOrigProfile(orig *otlpprofiles.Profile, nullable bool) { orig.Reset() return } - - DeleteOrigValueType(&orig.SampleType, false) - for i := range orig.Sample { - DeleteOrigSample(orig.Sample[i], true) + DeleteValueType(&orig.SampleType, false) + for i := range orig.Samples { + DeleteSample(orig.Samples[i], true) } - DeleteOrigValueType(&orig.PeriodType, false) - DeleteOrigProfileID(&orig.ProfileId, false) + + DeleteValueType(&orig.PeriodType, false) + + DeleteProfileID(&orig.ProfileId, false) orig.Reset() if nullable { @@ -55,54 +70,102 @@ func DeleteOrigProfile(orig *otlpprofiles.Profile, nullable bool) { } } -func CopyOrigProfile(dest, src *otlpprofiles.Profile) { +func CopyProfile(dest, src *Profile) *Profile { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil } - CopyOrigValueType(&dest.SampleType, &src.SampleType) - dest.Sample = CopyOrigSampleSlice(dest.Sample, src.Sample) + + if dest == nil { + dest = NewProfile() + } + CopyValueType(&dest.SampleType, &src.SampleType) + + dest.Samples = CopySamplePtrSlice(dest.Samples, src.Samples) + dest.TimeUnixNano = src.TimeUnixNano dest.DurationNano = src.DurationNano - CopyOrigValueType(&dest.PeriodType, &src.PeriodType) + CopyValueType(&dest.PeriodType, &src.PeriodType) + dest.Period = src.Period - dest.CommentStrindices = CopyOrigInt32Slice(dest.CommentStrindices, src.CommentStrindices) - dest.ProfileId = src.ProfileId + CopyProfileID(&dest.ProfileId, &src.ProfileId) + dest.DroppedAttributesCount = src.DroppedAttributesCount dest.OriginalPayloadFormat = src.OriginalPayloadFormat - dest.OriginalPayload = CopyOrigByteSlice(dest.OriginalPayload, src.OriginalPayload) - dest.AttributeIndices = CopyOrigInt32Slice(dest.AttributeIndices, src.AttributeIndices) + dest.OriginalPayload = src.OriginalPayload + dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...) + + return dest } -func GenTestOrigProfile() *otlpprofiles.Profile { - orig := NewOrigProfile() - orig.SampleType = *GenTestOrigValueType() - orig.Sample = GenerateOrigTestSampleSlice() - orig.TimeUnixNano = 1234567890 - orig.DurationNano = 1234567890 - orig.PeriodType = *GenTestOrigValueType() - orig.Period = int64(13) - orig.CommentStrindices = GenerateOrigTestInt32Slice() - orig.ProfileId = data.ProfileID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) - orig.DroppedAttributesCount = uint32(13) - orig.OriginalPayloadFormat = "test_originalpayloadformat" - orig.OriginalPayload = GenerateOrigTestByteSlice() - orig.AttributeIndices = GenerateOrigTestInt32Slice() - return orig +func CopyProfileSlice(dest, src []Profile) []Profile { + var newDest []Profile + if cap(dest) < len(src) { + newDest = make([]Profile, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteProfile(&dest[i], false) + } + } + for i := range src { + CopyProfile(&newDest[i], &src[i]) + } + return newDest +} + +func CopyProfilePtrSlice(dest, src []*Profile) []*Profile { + var newDest []*Profile + if cap(dest) < len(src) { + newDest = make([]*Profile, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewProfile() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteProfile(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewProfile() + } + } + for i := range src { + CopyProfile(newDest[i], src[i]) + } + return newDest +} + +func (orig *Profile) Reset() { + *orig = Profile{} } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigProfile(orig *otlpprofiles.Profile, dest *json.Stream) { +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Profile) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() dest.WriteObjectField("sampleType") - MarshalJSONOrigValueType(&orig.SampleType, dest) - if len(orig.Sample) > 0 { - dest.WriteObjectField("sample") + orig.SampleType.MarshalJSON(dest) + if len(orig.Samples) > 0 { + dest.WriteObjectField("samples") dest.WriteArrayStart() - MarshalJSONOrigSample(orig.Sample[0], dest) - for i := 1; i < len(orig.Sample); i++ { + orig.Samples[0].MarshalJSON(dest) + for i := 1; i < len(orig.Samples); i++ { dest.WriteMore() - MarshalJSONOrigSample(orig.Sample[i], dest) + orig.Samples[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -115,24 +178,14 @@ func MarshalJSONOrigProfile(orig *otlpprofiles.Profile, dest *json.Stream) { dest.WriteUint64(orig.DurationNano) } dest.WriteObjectField("periodType") - MarshalJSONOrigValueType(&orig.PeriodType, dest) + orig.PeriodType.MarshalJSON(dest) if orig.Period != int64(0) { dest.WriteObjectField("period") dest.WriteInt64(orig.Period) } - if len(orig.CommentStrindices) > 0 { - dest.WriteObjectField("commentStrindices") - dest.WriteArrayStart() - dest.WriteInt32(orig.CommentStrindices[0]) - for i := 1; i < len(orig.CommentStrindices); i++ { - dest.WriteMore() - dest.WriteInt32(orig.CommentStrindices[i]) - } - dest.WriteArrayEnd() - } - if orig.ProfileId != data.ProfileID([16]byte{}) { + if !orig.ProfileId.IsEmpty() { dest.WriteObjectField("profileId") - MarshalJSONOrigProfileID(&orig.ProfileId, dest) + orig.ProfileId.MarshalJSON(dest) } if orig.DroppedAttributesCount != uint32(0) { dest.WriteObjectField("droppedAttributesCount") @@ -157,19 +210,21 @@ func MarshalJSONOrigProfile(orig *otlpprofiles.Profile, dest *json.Stream) { } dest.WriteArrayEnd() } + dest.WriteObjectEnd() } -// UnmarshalJSONOrigProfile unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigProfile(orig *otlpprofiles.Profile, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Profile) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "sampleType", "sample_type": - UnmarshalJSONOrigValueType(&orig.SampleType, iter) - case "sample": + + orig.SampleType.UnmarshalJSON(iter) + case "samples": for iter.ReadArray() { - orig.Sample = append(orig.Sample, NewOrigSample()) - UnmarshalJSONOrigSample(orig.Sample[len(orig.Sample)-1], iter) + orig.Samples = append(orig.Samples, NewSample()) + orig.Samples[len(orig.Samples)-1].UnmarshalJSON(iter) } case "timeUnixNano", "time_unix_nano": @@ -177,16 +232,13 @@ func UnmarshalJSONOrigProfile(orig *otlpprofiles.Profile, iter *json.Iterator) { case "durationNano", "duration_nano": orig.DurationNano = iter.ReadUint64() case "periodType", "period_type": - UnmarshalJSONOrigValueType(&orig.PeriodType, iter) + + orig.PeriodType.UnmarshalJSON(iter) case "period": orig.Period = iter.ReadInt64() - case "commentStrindices", "comment_strindices": - for iter.ReadArray() { - orig.CommentStrindices = append(orig.CommentStrindices, iter.ReadInt32()) - } - case "profileId", "profile_id": - UnmarshalJSONOrigProfileID(&orig.ProfileId, iter) + + orig.ProfileId.UnmarshalJSON(iter) case "droppedAttributesCount", "dropped_attributes_count": orig.DroppedAttributesCount = iter.ReadUint32() case "originalPayloadFormat", "original_payload_format": @@ -204,47 +256,43 @@ func UnmarshalJSONOrigProfile(orig *otlpprofiles.Profile, iter *json.Iterator) { } } -func SizeProtoOrigProfile(orig *otlpprofiles.Profile) int { +func (orig *Profile) SizeProto() int { var n int var l int _ = l - l = SizeProtoOrigValueType(&orig.SampleType) + l = orig.SampleType.SizeProto() n += 1 + proto.Sov(uint64(l)) + l - for i := range orig.Sample { - l = SizeProtoOrigSample(orig.Sample[i]) + for i := range orig.Samples { + l = orig.Samples[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { n += 9 } - if orig.DurationNano != 0 { + if orig.DurationNano != uint64(0) { n += 1 + proto.Sov(uint64(orig.DurationNano)) } - l = SizeProtoOrigValueType(&orig.PeriodType) + l = orig.PeriodType.SizeProto() n += 1 + proto.Sov(uint64(l)) + l - if orig.Period != 0 { + if orig.Period != int64(0) { n += 1 + proto.Sov(uint64(orig.Period)) } - if len(orig.CommentStrindices) > 0 { - l = 0 - for _, e := range orig.CommentStrindices { - l += proto.Sov(uint64(e)) - } - n += 1 + proto.Sov(uint64(l)) + l - } - l = SizeProtoOrigProfileID(&orig.ProfileId) + l = orig.ProfileId.SizeProto() n += 1 + proto.Sov(uint64(l)) + l - if orig.DroppedAttributesCount != 0 { + if orig.DroppedAttributesCount != uint32(0) { n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount)) } + l = len(orig.OriginalPayloadFormat) if l > 0 { n += 1 + proto.Sov(uint64(l)) + l } + l = len(orig.OriginalPayload) if l > 0 { n += 1 + proto.Sov(uint64(l)) + l } + if len(orig.AttributeIndices) > 0 { l = 0 for _, e := range orig.AttributeIndices { @@ -255,68 +303,55 @@ func SizeProtoOrigProfile(orig *otlpprofiles.Profile) int { return n } -func MarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) int { +func (orig *Profile) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l - - l = MarshalProtoOrigValueType(&orig.SampleType, buf[:pos]) + l = orig.SampleType.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0xa - for i := len(orig.Sample) - 1; i >= 0; i-- { - l = MarshalProtoOrigSample(orig.Sample[i], buf[:pos]) + for i := len(orig.Samples) - 1; i >= 0; i-- { + l = orig.Samples[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x12 } - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano)) pos-- buf[pos] = 0x19 } - if orig.DurationNano != 0 { + if orig.DurationNano != uint64(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.DurationNano)) pos-- buf[pos] = 0x20 } - - l = MarshalProtoOrigValueType(&orig.PeriodType, buf[:pos]) + l = orig.PeriodType.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x2a - if orig.Period != 0 { + if orig.Period != int64(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.Period)) pos-- buf[pos] = 0x30 } - l = len(orig.CommentStrindices) - if l > 0 { - endPos := pos - for i := l - 1; i >= 0; i-- { - pos = proto.EncodeVarint(buf, pos, uint64(orig.CommentStrindices[i])) - } - pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos)) - pos-- - buf[pos] = 0x3a - } - - l = MarshalProtoOrigProfileID(&orig.ProfileId, buf[:pos]) + l = orig.ProfileId.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- - buf[pos] = 0x42 + buf[pos] = 0x3a - if orig.DroppedAttributesCount != 0 { + if orig.DroppedAttributesCount != uint32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount)) pos-- - buf[pos] = 0x48 + buf[pos] = 0x40 } l = len(orig.OriginalPayloadFormat) if l > 0 { @@ -324,7 +359,7 @@ func MarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) int { copy(buf[pos:], orig.OriginalPayloadFormat) pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- - buf[pos] = 0x52 + buf[pos] = 0x4a } l = len(orig.OriginalPayload) if l > 0 { @@ -332,7 +367,7 @@ func MarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) int { copy(buf[pos:], orig.OriginalPayload) pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- - buf[pos] = 0x5a + buf[pos] = 0x52 } l = len(orig.AttributeIndices) if l > 0 { @@ -342,12 +377,12 @@ func MarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) int { } pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos)) pos-- - buf[pos] = 0x62 + buf[pos] = 0x5a } return len(buf) - pos } -func UnmarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) error { +func (orig *Profile) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -373,14 +408,14 @@ func UnmarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) error { } startPos := pos - length - err = UnmarshalProtoOrigValueType(&orig.SampleType, buf[startPos:pos]) + err = orig.SampleType.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } case 2: if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) } var length int length, pos, err = proto.ConsumeLen(buf, pos) @@ -388,8 +423,8 @@ func UnmarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) error { return err } startPos := pos - length - orig.Sample = append(orig.Sample, NewOrigSample()) - err = UnmarshalProtoOrigSample(orig.Sample[len(orig.Sample)-1], buf[startPos:pos]) + orig.Samples = append(orig.Samples, NewSample()) + err = orig.Samples[len(orig.Samples)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -415,7 +450,6 @@ func UnmarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) error { if err != nil { return err } - orig.DurationNano = uint64(num) case 5: @@ -429,7 +463,7 @@ func UnmarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) error { } startPos := pos - length - err = UnmarshalProtoOrigValueType(&orig.PeriodType, buf[startPos:pos]) + err = orig.PeriodType.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -443,40 +477,9 @@ func UnmarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) error { if err != nil { return err } - orig.Period = int64(num) - case 7: - switch wireType { - case proto.WireTypeLen: - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - var num uint64 - for startPos < pos { - num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos) - if err != nil { - return err - } - orig.CommentStrindices = append(orig.CommentStrindices, int32(num)) - } - if startPos != pos { - return fmt.Errorf("proto: invalid field len = %d for field CommentStrindices", pos-startPos) - } - case proto.WireTypeVarint: - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - orig.CommentStrindices = append(orig.CommentStrindices, int32(num)) - default: - return fmt.Errorf("proto: wrong wireType = %d for field CommentStrindices", wireType) - } - case 8: + case 7: if wireType != proto.WireTypeLen { return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType) } @@ -487,12 +490,12 @@ func UnmarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) error { } startPos := pos - length - err = UnmarshalProtoOrigProfileID(&orig.ProfileId, buf[startPos:pos]) + err = orig.ProfileId.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } - case 9: + case 8: if wireType != proto.WireTypeVarint { return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) } @@ -501,10 +504,9 @@ func UnmarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) error { if err != nil { return err } - orig.DroppedAttributesCount = uint32(num) - case 10: + case 9: if wireType != proto.WireTypeLen { return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType) } @@ -516,7 +518,7 @@ func UnmarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) error { startPos := pos - length orig.OriginalPayloadFormat = string(buf[startPos:pos]) - case 11: + case 10: if wireType != proto.WireTypeLen { return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType) } @@ -530,7 +532,7 @@ func UnmarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) error { orig.OriginalPayload = make([]byte, length) copy(orig.OriginalPayload, buf[startPos:pos]) } - case 12: + case 11: switch wireType { case proto.WireTypeLen: var length int @@ -569,3 +571,36 @@ func UnmarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) error { } return nil } + +func GenTestProfile() *Profile { + orig := NewProfile() + orig.SampleType = *GenTestValueType() + orig.Samples = []*Sample{{}, GenTestSample()} + orig.TimeUnixNano = uint64(13) + orig.DurationNano = uint64(13) + orig.PeriodType = *GenTestValueType() + orig.Period = int64(13) + orig.ProfileId = *GenTestProfileID() + orig.DroppedAttributesCount = uint32(13) + orig.OriginalPayloadFormat = "test_originalpayloadformat" + orig.OriginalPayload = []byte{1, 2, 3} + orig.AttributeIndices = []int32{int32(0), int32(13)} + return orig +} + +func GenTestProfilePtrSlice() []*Profile { + orig := make([]*Profile, 5) + orig[0] = NewProfile() + orig[1] = GenTestProfile() + orig[2] = NewProfile() + orig[3] = GenTestProfile() + orig[4] = NewProfile() + return orig +} + +func GenTestProfileSlice() []Profile { + orig := make([]Profile, 5) + orig[1] = *GenTestProfile() + orig[3] = *GenTestProfile() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdata.go new file mode 100644 index 00000000000..45a876f6deb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdata.go @@ -0,0 +1,279 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ProfilesData represents the profiles data that can be stored in persistent storage, +// OR can be embedded by other protocols that transfer OTLP profiles data but do not +// implement the OTLP protocol. +type ProfilesData struct { + ResourceProfiles []*ResourceProfiles + Dictionary ProfilesDictionary +} + +var ( + protoPoolProfilesData = sync.Pool{ + New: func() any { + return &ProfilesData{} + }, + } +) + +func NewProfilesData() *ProfilesData { + if !UseProtoPooling.IsEnabled() { + return &ProfilesData{} + } + return protoPoolProfilesData.Get().(*ProfilesData) +} + +func DeleteProfilesData(orig *ProfilesData, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.ResourceProfiles { + DeleteResourceProfiles(orig.ResourceProfiles[i], true) + } + DeleteProfilesDictionary(&orig.Dictionary, false) + orig.Reset() + if nullable { + protoPoolProfilesData.Put(orig) + } +} + +func CopyProfilesData(dest, src *ProfilesData) *ProfilesData { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewProfilesData() + } + dest.ResourceProfiles = CopyResourceProfilesPtrSlice(dest.ResourceProfiles, src.ResourceProfiles) + + CopyProfilesDictionary(&dest.Dictionary, &src.Dictionary) + + return dest +} + +func CopyProfilesDataSlice(dest, src []ProfilesData) []ProfilesData { + var newDest []ProfilesData + if cap(dest) < len(src) { + newDest = make([]ProfilesData, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteProfilesData(&dest[i], false) + } + } + for i := range src { + CopyProfilesData(&newDest[i], &src[i]) + } + return newDest +} + +func CopyProfilesDataPtrSlice(dest, src []*ProfilesData) []*ProfilesData { + var newDest []*ProfilesData + if cap(dest) < len(src) { + newDest = make([]*ProfilesData, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewProfilesData() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteProfilesData(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewProfilesData() + } + } + for i := range src { + CopyProfilesData(newDest[i], src[i]) + } + return newDest +} + +func (orig *ProfilesData) Reset() { + *orig = ProfilesData{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ProfilesData) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.ResourceProfiles) > 0 { + dest.WriteObjectField("resourceProfiles") + dest.WriteArrayStart() + orig.ResourceProfiles[0].MarshalJSON(dest) + for i := 1; i < len(orig.ResourceProfiles); i++ { + dest.WriteMore() + orig.ResourceProfiles[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectField("dictionary") + orig.Dictionary.MarshalJSON(dest) + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ProfilesData) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "resourceProfiles", "resource_profiles": + for iter.ReadArray() { + orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles()) + orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalJSON(iter) + } + + case "dictionary": + + orig.Dictionary.UnmarshalJSON(iter) + default: + iter.Skip() + } + } +} + +func (orig *ProfilesData) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.ResourceProfiles { + l = orig.ResourceProfiles[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + l = orig.Dictionary.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + return n +} + +func (orig *ProfilesData) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.ResourceProfiles) - 1; i >= 0; i-- { + l = orig.ResourceProfiles[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + l = orig.Dictionary.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + + return len(buf) - pos +} + +func (orig *ProfilesData) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles()) + err = orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.Dictionary.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestProfilesData() *ProfilesData { + orig := NewProfilesData() + orig.ResourceProfiles = []*ResourceProfiles{{}, GenTestResourceProfiles()} + orig.Dictionary = *GenTestProfilesDictionary() + return orig +} + +func GenTestProfilesDataPtrSlice() []*ProfilesData { + orig := make([]*ProfilesData, 5) + orig[0] = NewProfilesData() + orig[1] = GenTestProfilesData() + orig[2] = NewProfilesData() + orig[3] = GenTestProfilesData() + orig[4] = NewProfilesData() + return orig +} + +func GenTestProfilesDataSlice() []ProfilesData { + orig := make([]ProfilesData, 5) + orig[1] = *GenTestProfilesData() + orig[3] = *GenTestProfilesData() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdictionary.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdictionary.go similarity index 50% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdictionary.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdictionary.go index 671414494b2..8b8bef11c57 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdictionary.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdictionary.go @@ -10,27 +10,37 @@ import ( "fmt" "sync" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// ProfilesDictionary is the reference table containing all data shared by profiles across the message being sent. +type ProfilesDictionary struct { + MappingTable []*Mapping + LocationTable []*Location + FunctionTable []*Function + LinkTable []*Link + StringTable []string + AttributeTable []*KeyValueAndUnit + StackTable []*Stack +} + var ( protoPoolProfilesDictionary = sync.Pool{ New: func() any { - return &otlpprofiles.ProfilesDictionary{} + return &ProfilesDictionary{} }, } ) -func NewOrigProfilesDictionary() *otlpprofiles.ProfilesDictionary { +func NewProfilesDictionary() *ProfilesDictionary { if !UseProtoPooling.IsEnabled() { - return &otlpprofiles.ProfilesDictionary{} + return &ProfilesDictionary{} } - return protoPoolProfilesDictionary.Get().(*otlpprofiles.ProfilesDictionary) + return protoPoolProfilesDictionary.Get().(*ProfilesDictionary) } -func DeleteOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, nullable bool) { +func DeleteProfilesDictionary(orig *ProfilesDictionary, nullable bool) { if orig == nil { return } @@ -39,98 +49,153 @@ func DeleteOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, nullabl orig.Reset() return } - for i := range orig.MappingTable { - DeleteOrigMapping(orig.MappingTable[i], true) + DeleteMapping(orig.MappingTable[i], true) } for i := range orig.LocationTable { - DeleteOrigLocation(orig.LocationTable[i], true) + DeleteLocation(orig.LocationTable[i], true) } for i := range orig.FunctionTable { - DeleteOrigFunction(orig.FunctionTable[i], true) + DeleteFunction(orig.FunctionTable[i], true) } for i := range orig.LinkTable { - DeleteOrigLink(orig.LinkTable[i], true) + DeleteLink(orig.LinkTable[i], true) } + for i := range orig.AttributeTable { - DeleteOrigKeyValueAndUnit(orig.AttributeTable[i], true) + DeleteKeyValueAndUnit(orig.AttributeTable[i], true) } for i := range orig.StackTable { - DeleteOrigStack(orig.StackTable[i], true) + DeleteStack(orig.StackTable[i], true) } - orig.Reset() if nullable { protoPoolProfilesDictionary.Put(orig) } } -func CopyOrigProfilesDictionary(dest, src *otlpprofiles.ProfilesDictionary) { +func CopyProfilesDictionary(dest, src *ProfilesDictionary) *ProfilesDictionary { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewProfilesDictionary() } - dest.MappingTable = CopyOrigMappingSlice(dest.MappingTable, src.MappingTable) - dest.LocationTable = CopyOrigLocationSlice(dest.LocationTable, src.LocationTable) - dest.FunctionTable = CopyOrigFunctionSlice(dest.FunctionTable, src.FunctionTable) - dest.LinkTable = CopyOrigLinkSlice(dest.LinkTable, src.LinkTable) - dest.StringTable = CopyOrigStringSlice(dest.StringTable, src.StringTable) - dest.AttributeTable = CopyOrigKeyValueAndUnitSlice(dest.AttributeTable, src.AttributeTable) - dest.StackTable = CopyOrigStackSlice(dest.StackTable, src.StackTable) + dest.MappingTable = CopyMappingPtrSlice(dest.MappingTable, src.MappingTable) + + dest.LocationTable = CopyLocationPtrSlice(dest.LocationTable, src.LocationTable) + + dest.FunctionTable = CopyFunctionPtrSlice(dest.FunctionTable, src.FunctionTable) + + dest.LinkTable = CopyLinkPtrSlice(dest.LinkTable, src.LinkTable) + + dest.StringTable = append(dest.StringTable[:0], src.StringTable...) + + dest.AttributeTable = CopyKeyValueAndUnitPtrSlice(dest.AttributeTable, src.AttributeTable) + + dest.StackTable = CopyStackPtrSlice(dest.StackTable, src.StackTable) + + return dest } -func GenTestOrigProfilesDictionary() *otlpprofiles.ProfilesDictionary { - orig := NewOrigProfilesDictionary() - orig.MappingTable = GenerateOrigTestMappingSlice() - orig.LocationTable = GenerateOrigTestLocationSlice() - orig.FunctionTable = GenerateOrigTestFunctionSlice() - orig.LinkTable = GenerateOrigTestLinkSlice() - orig.StringTable = GenerateOrigTestStringSlice() - orig.AttributeTable = GenerateOrigTestKeyValueAndUnitSlice() - orig.StackTable = GenerateOrigTestStackSlice() - return orig +func CopyProfilesDictionarySlice(dest, src []ProfilesDictionary) []ProfilesDictionary { + var newDest []ProfilesDictionary + if cap(dest) < len(src) { + newDest = make([]ProfilesDictionary, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteProfilesDictionary(&dest[i], false) + } + } + for i := range src { + CopyProfilesDictionary(&newDest[i], &src[i]) + } + return newDest +} + +func CopyProfilesDictionaryPtrSlice(dest, src []*ProfilesDictionary) []*ProfilesDictionary { + var newDest []*ProfilesDictionary + if cap(dest) < len(src) { + newDest = make([]*ProfilesDictionary, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewProfilesDictionary() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteProfilesDictionary(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewProfilesDictionary() + } + } + for i := range src { + CopyProfilesDictionary(newDest[i], src[i]) + } + return newDest +} + +func (orig *ProfilesDictionary) Reset() { + *orig = ProfilesDictionary{} } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, dest *json.Stream) { +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ProfilesDictionary) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() if len(orig.MappingTable) > 0 { dest.WriteObjectField("mappingTable") dest.WriteArrayStart() - MarshalJSONOrigMapping(orig.MappingTable[0], dest) + orig.MappingTable[0].MarshalJSON(dest) for i := 1; i < len(orig.MappingTable); i++ { dest.WriteMore() - MarshalJSONOrigMapping(orig.MappingTable[i], dest) + orig.MappingTable[i].MarshalJSON(dest) } dest.WriteArrayEnd() } if len(orig.LocationTable) > 0 { dest.WriteObjectField("locationTable") dest.WriteArrayStart() - MarshalJSONOrigLocation(orig.LocationTable[0], dest) + orig.LocationTable[0].MarshalJSON(dest) for i := 1; i < len(orig.LocationTable); i++ { dest.WriteMore() - MarshalJSONOrigLocation(orig.LocationTable[i], dest) + orig.LocationTable[i].MarshalJSON(dest) } dest.WriteArrayEnd() } if len(orig.FunctionTable) > 0 { dest.WriteObjectField("functionTable") dest.WriteArrayStart() - MarshalJSONOrigFunction(orig.FunctionTable[0], dest) + orig.FunctionTable[0].MarshalJSON(dest) for i := 1; i < len(orig.FunctionTable); i++ { dest.WriteMore() - MarshalJSONOrigFunction(orig.FunctionTable[i], dest) + orig.FunctionTable[i].MarshalJSON(dest) } dest.WriteArrayEnd() } if len(orig.LinkTable) > 0 { dest.WriteObjectField("linkTable") dest.WriteArrayStart() - MarshalJSONOrigLink(orig.LinkTable[0], dest) + orig.LinkTable[0].MarshalJSON(dest) for i := 1; i < len(orig.LinkTable); i++ { dest.WriteMore() - MarshalJSONOrigLink(orig.LinkTable[i], dest) + orig.LinkTable[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -144,55 +209,56 @@ func MarshalJSONOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, de } dest.WriteArrayEnd() } + if len(orig.AttributeTable) > 0 { dest.WriteObjectField("attributeTable") dest.WriteArrayStart() - MarshalJSONOrigKeyValueAndUnit(orig.AttributeTable[0], dest) + orig.AttributeTable[0].MarshalJSON(dest) for i := 1; i < len(orig.AttributeTable); i++ { dest.WriteMore() - MarshalJSONOrigKeyValueAndUnit(orig.AttributeTable[i], dest) + orig.AttributeTable[i].MarshalJSON(dest) } dest.WriteArrayEnd() } if len(orig.StackTable) > 0 { dest.WriteObjectField("stackTable") dest.WriteArrayStart() - MarshalJSONOrigStack(orig.StackTable[0], dest) + orig.StackTable[0].MarshalJSON(dest) for i := 1; i < len(orig.StackTable); i++ { dest.WriteMore() - MarshalJSONOrigStack(orig.StackTable[i], dest) + orig.StackTable[i].MarshalJSON(dest) } dest.WriteArrayEnd() } dest.WriteObjectEnd() } -// UnmarshalJSONOrigProfilesDictionary unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ProfilesDictionary) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "mappingTable", "mapping_table": for iter.ReadArray() { - orig.MappingTable = append(orig.MappingTable, NewOrigMapping()) - UnmarshalJSONOrigMapping(orig.MappingTable[len(orig.MappingTable)-1], iter) + orig.MappingTable = append(orig.MappingTable, NewMapping()) + orig.MappingTable[len(orig.MappingTable)-1].UnmarshalJSON(iter) } case "locationTable", "location_table": for iter.ReadArray() { - orig.LocationTable = append(orig.LocationTable, NewOrigLocation()) - UnmarshalJSONOrigLocation(orig.LocationTable[len(orig.LocationTable)-1], iter) + orig.LocationTable = append(orig.LocationTable, NewLocation()) + orig.LocationTable[len(orig.LocationTable)-1].UnmarshalJSON(iter) } case "functionTable", "function_table": for iter.ReadArray() { - orig.FunctionTable = append(orig.FunctionTable, NewOrigFunction()) - UnmarshalJSONOrigFunction(orig.FunctionTable[len(orig.FunctionTable)-1], iter) + orig.FunctionTable = append(orig.FunctionTable, NewFunction()) + orig.FunctionTable[len(orig.FunctionTable)-1].UnmarshalJSON(iter) } case "linkTable", "link_table": for iter.ReadArray() { - orig.LinkTable = append(orig.LinkTable, NewOrigLink()) - UnmarshalJSONOrigLink(orig.LinkTable[len(orig.LinkTable)-1], iter) + orig.LinkTable = append(orig.LinkTable, NewLink()) + orig.LinkTable[len(orig.LinkTable)-1].UnmarshalJSON(iter) } case "stringTable", "string_table": @@ -202,14 +268,14 @@ func UnmarshalJSONOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, case "attributeTable", "attribute_table": for iter.ReadArray() { - orig.AttributeTable = append(orig.AttributeTable, NewOrigKeyValueAndUnit()) - UnmarshalJSONOrigKeyValueAndUnit(orig.AttributeTable[len(orig.AttributeTable)-1], iter) + orig.AttributeTable = append(orig.AttributeTable, NewKeyValueAndUnit()) + orig.AttributeTable[len(orig.AttributeTable)-1].UnmarshalJSON(iter) } case "stackTable", "stack_table": for iter.ReadArray() { - orig.StackTable = append(orig.StackTable, NewOrigStack()) - UnmarshalJSONOrigStack(orig.StackTable[len(orig.StackTable)-1], iter) + orig.StackTable = append(orig.StackTable, NewStack()) + orig.StackTable[len(orig.StackTable)-1].UnmarshalJSON(iter) } default: @@ -218,24 +284,24 @@ func UnmarshalJSONOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, } } -func SizeProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary) int { +func (orig *ProfilesDictionary) SizeProto() int { var n int var l int _ = l for i := range orig.MappingTable { - l = SizeProtoOrigMapping(orig.MappingTable[i]) + l = orig.MappingTable[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } for i := range orig.LocationTable { - l = SizeProtoOrigLocation(orig.LocationTable[i]) + l = orig.LocationTable[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } for i := range orig.FunctionTable { - l = SizeProtoOrigFunction(orig.FunctionTable[i]) + l = orig.FunctionTable[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } for i := range orig.LinkTable { - l = SizeProtoOrigLink(orig.LinkTable[i]) + l = orig.LinkTable[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } for _, s := range orig.StringTable { @@ -243,43 +309,43 @@ func SizeProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary) int n += 1 + proto.Sov(uint64(l)) + l } for i := range orig.AttributeTable { - l = SizeProtoOrigKeyValueAndUnit(orig.AttributeTable[i]) + l = orig.AttributeTable[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } for i := range orig.StackTable { - l = SizeProtoOrigStack(orig.StackTable[i]) + l = orig.StackTable[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } return n } -func MarshalProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, buf []byte) int { +func (orig *ProfilesDictionary) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l for i := len(orig.MappingTable) - 1; i >= 0; i-- { - l = MarshalProtoOrigMapping(orig.MappingTable[i], buf[:pos]) + l = orig.MappingTable[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0xa } for i := len(orig.LocationTable) - 1; i >= 0; i-- { - l = MarshalProtoOrigLocation(orig.LocationTable[i], buf[:pos]) + l = orig.LocationTable[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x12 } for i := len(orig.FunctionTable) - 1; i >= 0; i-- { - l = MarshalProtoOrigFunction(orig.FunctionTable[i], buf[:pos]) + l = orig.FunctionTable[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x1a } for i := len(orig.LinkTable) - 1; i >= 0; i-- { - l = MarshalProtoOrigLink(orig.LinkTable[i], buf[:pos]) + l = orig.LinkTable[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- @@ -294,14 +360,14 @@ func MarshalProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, b buf[pos] = 0x2a } for i := len(orig.AttributeTable) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValueAndUnit(orig.AttributeTable[i], buf[:pos]) + l = orig.AttributeTable[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x32 } for i := len(orig.StackTable) - 1; i >= 0; i-- { - l = MarshalProtoOrigStack(orig.StackTable[i], buf[:pos]) + l = orig.StackTable[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- @@ -310,7 +376,7 @@ func MarshalProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, b return len(buf) - pos } -func UnmarshalProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, buf []byte) error { +func (orig *ProfilesDictionary) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -335,8 +401,8 @@ func UnmarshalProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, return err } startPos := pos - length - orig.MappingTable = append(orig.MappingTable, NewOrigMapping()) - err = UnmarshalProtoOrigMapping(orig.MappingTable[len(orig.MappingTable)-1], buf[startPos:pos]) + orig.MappingTable = append(orig.MappingTable, NewMapping()) + err = orig.MappingTable[len(orig.MappingTable)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -351,8 +417,8 @@ func UnmarshalProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, return err } startPos := pos - length - orig.LocationTable = append(orig.LocationTable, NewOrigLocation()) - err = UnmarshalProtoOrigLocation(orig.LocationTable[len(orig.LocationTable)-1], buf[startPos:pos]) + orig.LocationTable = append(orig.LocationTable, NewLocation()) + err = orig.LocationTable[len(orig.LocationTable)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -367,8 +433,8 @@ func UnmarshalProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, return err } startPos := pos - length - orig.FunctionTable = append(orig.FunctionTable, NewOrigFunction()) - err = UnmarshalProtoOrigFunction(orig.FunctionTable[len(orig.FunctionTable)-1], buf[startPos:pos]) + orig.FunctionTable = append(orig.FunctionTable, NewFunction()) + err = orig.FunctionTable[len(orig.FunctionTable)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -383,8 +449,8 @@ func UnmarshalProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, return err } startPos := pos - length - orig.LinkTable = append(orig.LinkTable, NewOrigLink()) - err = UnmarshalProtoOrigLink(orig.LinkTable[len(orig.LinkTable)-1], buf[startPos:pos]) + orig.LinkTable = append(orig.LinkTable, NewLink()) + err = orig.LinkTable[len(orig.LinkTable)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -411,8 +477,8 @@ func UnmarshalProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, return err } startPos := pos - length - orig.AttributeTable = append(orig.AttributeTable, NewOrigKeyValueAndUnit()) - err = UnmarshalProtoOrigKeyValueAndUnit(orig.AttributeTable[len(orig.AttributeTable)-1], buf[startPos:pos]) + orig.AttributeTable = append(orig.AttributeTable, NewKeyValueAndUnit()) + err = orig.AttributeTable[len(orig.AttributeTable)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -427,8 +493,8 @@ func UnmarshalProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, return err } startPos := pos - length - orig.StackTable = append(orig.StackTable, NewOrigStack()) - err = UnmarshalProtoOrigStack(orig.StackTable[len(orig.StackTable)-1], buf[startPos:pos]) + orig.StackTable = append(orig.StackTable, NewStack()) + err = orig.StackTable[len(orig.StackTable)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -441,3 +507,32 @@ func UnmarshalProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, } return nil } + +func GenTestProfilesDictionary() *ProfilesDictionary { + orig := NewProfilesDictionary() + orig.MappingTable = []*Mapping{{}, GenTestMapping()} + orig.LocationTable = []*Location{{}, GenTestLocation()} + orig.FunctionTable = []*Function{{}, GenTestFunction()} + orig.LinkTable = []*Link{{}, GenTestLink()} + orig.StringTable = []string{"", "test_stringtable"} + orig.AttributeTable = []*KeyValueAndUnit{{}, GenTestKeyValueAndUnit()} + orig.StackTable = []*Stack{{}, GenTestStack()} + return orig +} + +func GenTestProfilesDictionaryPtrSlice() []*ProfilesDictionary { + orig := make([]*ProfilesDictionary, 5) + orig[0] = NewProfilesDictionary() + orig[1] = GenTestProfilesDictionary() + orig[2] = NewProfilesDictionary() + orig[3] = GenTestProfilesDictionary() + orig[4] = NewProfilesDictionary() + return orig +} + +func GenTestProfilesDictionarySlice() []ProfilesDictionary { + orig := make([]ProfilesDictionary, 5) + orig[1] = *GenTestProfilesDictionary() + orig[3] = *GenTestProfilesDictionary() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesrequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesrequest.go new file mode 100644 index 00000000000..b8956807f7d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesrequest.go @@ -0,0 +1,299 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "encoding/binary" + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +type ProfilesRequest struct { + RequestContext *RequestContext + ProfilesData ProfilesData + FormatVersion uint32 +} + +var ( + protoPoolProfilesRequest = sync.Pool{ + New: func() any { + return &ProfilesRequest{} + }, + } +) + +func NewProfilesRequest() *ProfilesRequest { + if !UseProtoPooling.IsEnabled() { + return &ProfilesRequest{} + } + return protoPoolProfilesRequest.Get().(*ProfilesRequest) +} + +func DeleteProfilesRequest(orig *ProfilesRequest, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteRequestContext(orig.RequestContext, true) + DeleteProfilesData(&orig.ProfilesData, false) + + orig.Reset() + if nullable { + protoPoolProfilesRequest.Put(orig) + } +} + +func CopyProfilesRequest(dest, src *ProfilesRequest) *ProfilesRequest { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewProfilesRequest() + } + dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext) + + CopyProfilesData(&dest.ProfilesData, &src.ProfilesData) + + dest.FormatVersion = src.FormatVersion + + return dest +} + +func CopyProfilesRequestSlice(dest, src []ProfilesRequest) []ProfilesRequest { + var newDest []ProfilesRequest + if cap(dest) < len(src) { + newDest = make([]ProfilesRequest, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteProfilesRequest(&dest[i], false) + } + } + for i := range src { + CopyProfilesRequest(&newDest[i], &src[i]) + } + return newDest +} + +func CopyProfilesRequestPtrSlice(dest, src []*ProfilesRequest) []*ProfilesRequest { + var newDest []*ProfilesRequest + if cap(dest) < len(src) { + newDest = make([]*ProfilesRequest, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewProfilesRequest() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteProfilesRequest(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewProfilesRequest() + } + } + for i := range src { + CopyProfilesRequest(newDest[i], src[i]) + } + return newDest +} + +func (orig *ProfilesRequest) Reset() { + *orig = ProfilesRequest{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ProfilesRequest) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.RequestContext != nil { + dest.WriteObjectField("requestContext") + orig.RequestContext.MarshalJSON(dest) + } + dest.WriteObjectField("profilesData") + orig.ProfilesData.MarshalJSON(dest) + if orig.FormatVersion != uint32(0) { + dest.WriteObjectField("formatVersion") + dest.WriteUint32(orig.FormatVersion) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ProfilesRequest) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "requestContext", "request_context": + orig.RequestContext = NewRequestContext() + orig.RequestContext.UnmarshalJSON(iter) + case "profilesData", "profiles_data": + + orig.ProfilesData.UnmarshalJSON(iter) + case "formatVersion", "format_version": + orig.FormatVersion = iter.ReadUint32() + default: + iter.Skip() + } + } +} + +func (orig *ProfilesRequest) SizeProto() int { + var n int + var l int + _ = l + if orig.RequestContext != nil { + l = orig.RequestContext.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + l = orig.ProfilesData.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + if orig.FormatVersion != uint32(0) { + n += 5 + } + return n +} + +func (orig *ProfilesRequest) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + if orig.RequestContext != nil { + l = orig.RequestContext.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + l = orig.ProfilesData.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + + if orig.FormatVersion != uint32(0) { + pos -= 4 + binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion)) + pos-- + buf[pos] = 0xd + } + return len(buf) - pos +} + +func (orig *ProfilesRequest) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + orig.RequestContext = NewRequestContext() + err = orig.RequestContext.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ProfilesData", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.ProfilesData.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 1: + if wireType != proto.WireTypeI32 { + return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType) + } + var num uint32 + num, pos, err = proto.ConsumeI32(buf, pos) + if err != nil { + return err + } + + orig.FormatVersion = uint32(num) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestProfilesRequest() *ProfilesRequest { + orig := NewProfilesRequest() + orig.RequestContext = GenTestRequestContext() + orig.ProfilesData = *GenTestProfilesData() + orig.FormatVersion = uint32(13) + return orig +} + +func GenTestProfilesRequestPtrSlice() []*ProfilesRequest { + orig := make([]*ProfilesRequest, 5) + orig[0] = NewProfilesRequest() + orig[1] = GenTestProfilesRequest() + orig[2] = NewProfilesRequest() + orig[3] = GenTestProfilesRequest() + orig[4] = NewProfilesRequest() + return orig +} + +func GenTestProfilesRequestSlice() []ProfilesRequest { + orig := make([]ProfilesRequest, 5) + orig[1] = *GenTestProfilesRequest() + orig[3] = *GenTestProfilesRequest() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_requestcontext.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_requestcontext.go new file mode 100644 index 00000000000..eab9a270bd1 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_requestcontext.go @@ -0,0 +1,649 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +func (m *RequestContext) GetClientAddress() any { + if m != nil { + return m.ClientAddress + } + return nil +} + +type RequestContext_IP struct { + IP *IPAddr +} + +func (m *RequestContext) GetIP() *IPAddr { + if v, ok := m.GetClientAddress().(*RequestContext_IP); ok { + return v.IP + } + return nil +} + +type RequestContext_TCP struct { + TCP *TCPAddr +} + +func (m *RequestContext) GetTCP() *TCPAddr { + if v, ok := m.GetClientAddress().(*RequestContext_TCP); ok { + return v.TCP + } + return nil +} + +type RequestContext_UDP struct { + UDP *UDPAddr +} + +func (m *RequestContext) GetUDP() *UDPAddr { + if v, ok := m.GetClientAddress().(*RequestContext_UDP); ok { + return v.UDP + } + return nil +} + +type RequestContext_Unix struct { + Unix *UnixAddr +} + +func (m *RequestContext) GetUnix() *UnixAddr { + if v, ok := m.GetClientAddress().(*RequestContext_Unix); ok { + return v.Unix + } + return nil +} + +type RequestContext struct { + ClientAddress any + SpanContext *SpanContext + ClientMetadata []KeyValue +} + +var ( + protoPoolRequestContext = sync.Pool{ + New: func() any { + return &RequestContext{} + }, + } + + ProtoPoolRequestContext_IP = sync.Pool{ + New: func() any { + return &RequestContext_IP{} + }, + } + + ProtoPoolRequestContext_TCP = sync.Pool{ + New: func() any { + return &RequestContext_TCP{} + }, + } + + ProtoPoolRequestContext_UDP = sync.Pool{ + New: func() any { + return &RequestContext_UDP{} + }, + } + + ProtoPoolRequestContext_Unix = sync.Pool{ + New: func() any { + return &RequestContext_Unix{} + }, + } +) + +func NewRequestContext() *RequestContext { + if !UseProtoPooling.IsEnabled() { + return &RequestContext{} + } + return protoPoolRequestContext.Get().(*RequestContext) +} + +func DeleteRequestContext(orig *RequestContext, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteSpanContext(orig.SpanContext, true) + for i := range orig.ClientMetadata { + DeleteKeyValue(&orig.ClientMetadata[i], false) + } + switch ov := orig.ClientAddress.(type) { + case *RequestContext_IP: + DeleteIPAddr(ov.IP, true) + ov.IP = nil + ProtoPoolRequestContext_IP.Put(ov) + case *RequestContext_TCP: + DeleteTCPAddr(ov.TCP, true) + ov.TCP = nil + ProtoPoolRequestContext_TCP.Put(ov) + case *RequestContext_UDP: + DeleteUDPAddr(ov.UDP, true) + ov.UDP = nil + ProtoPoolRequestContext_UDP.Put(ov) + case *RequestContext_Unix: + DeleteUnixAddr(ov.Unix, true) + ov.Unix = nil + ProtoPoolRequestContext_Unix.Put(ov) + } + orig.Reset() + if nullable { + protoPoolRequestContext.Put(orig) + } +} + +func CopyRequestContext(dest, src *RequestContext) *RequestContext { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewRequestContext() + } + dest.SpanContext = CopySpanContext(dest.SpanContext, src.SpanContext) + + dest.ClientMetadata = CopyKeyValueSlice(dest.ClientMetadata, src.ClientMetadata) + + switch t := src.ClientAddress.(type) { + case *RequestContext_IP: + var ov *RequestContext_IP + if !UseProtoPooling.IsEnabled() { + ov = &RequestContext_IP{} + } else { + ov = ProtoPoolRequestContext_IP.Get().(*RequestContext_IP) + } + ov.IP = NewIPAddr() + CopyIPAddr(ov.IP, t.IP) + dest.ClientAddress = ov + + case *RequestContext_TCP: + var ov *RequestContext_TCP + if !UseProtoPooling.IsEnabled() { + ov = &RequestContext_TCP{} + } else { + ov = ProtoPoolRequestContext_TCP.Get().(*RequestContext_TCP) + } + ov.TCP = NewTCPAddr() + CopyTCPAddr(ov.TCP, t.TCP) + dest.ClientAddress = ov + + case *RequestContext_UDP: + var ov *RequestContext_UDP + if !UseProtoPooling.IsEnabled() { + ov = &RequestContext_UDP{} + } else { + ov = ProtoPoolRequestContext_UDP.Get().(*RequestContext_UDP) + } + ov.UDP = NewUDPAddr() + CopyUDPAddr(ov.UDP, t.UDP) + dest.ClientAddress = ov + + case *RequestContext_Unix: + var ov *RequestContext_Unix + if !UseProtoPooling.IsEnabled() { + ov = &RequestContext_Unix{} + } else { + ov = ProtoPoolRequestContext_Unix.Get().(*RequestContext_Unix) + } + ov.Unix = NewUnixAddr() + CopyUnixAddr(ov.Unix, t.Unix) + dest.ClientAddress = ov + + default: + dest.ClientAddress = nil + } + + return dest +} + +func CopyRequestContextSlice(dest, src []RequestContext) []RequestContext { + var newDest []RequestContext + if cap(dest) < len(src) { + newDest = make([]RequestContext, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteRequestContext(&dest[i], false) + } + } + for i := range src { + CopyRequestContext(&newDest[i], &src[i]) + } + return newDest +} + +func CopyRequestContextPtrSlice(dest, src []*RequestContext) []*RequestContext { + var newDest []*RequestContext + if cap(dest) < len(src) { + newDest = make([]*RequestContext, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewRequestContext() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteRequestContext(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewRequestContext() + } + } + for i := range src { + CopyRequestContext(newDest[i], src[i]) + } + return newDest +} + +func (orig *RequestContext) Reset() { + *orig = RequestContext{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *RequestContext) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.SpanContext != nil { + dest.WriteObjectField("spanContext") + orig.SpanContext.MarshalJSON(dest) + } + if len(orig.ClientMetadata) > 0 { + dest.WriteObjectField("clientMetadata") + dest.WriteArrayStart() + orig.ClientMetadata[0].MarshalJSON(dest) + for i := 1; i < len(orig.ClientMetadata); i++ { + dest.WriteMore() + orig.ClientMetadata[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + switch orig := orig.ClientAddress.(type) { + case *RequestContext_IP: + if orig.IP != nil { + dest.WriteObjectField("iP") + orig.IP.MarshalJSON(dest) + } + case *RequestContext_TCP: + if orig.TCP != nil { + dest.WriteObjectField("tCP") + orig.TCP.MarshalJSON(dest) + } + case *RequestContext_UDP: + if orig.UDP != nil { + dest.WriteObjectField("uDP") + orig.UDP.MarshalJSON(dest) + } + case *RequestContext_Unix: + if orig.Unix != nil { + dest.WriteObjectField("unix") + orig.Unix.MarshalJSON(dest) + } + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *RequestContext) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "spanContext", "span_context": + orig.SpanContext = NewSpanContext() + orig.SpanContext.UnmarshalJSON(iter) + case "clientMetadata", "client_metadata": + for iter.ReadArray() { + orig.ClientMetadata = append(orig.ClientMetadata, KeyValue{}) + orig.ClientMetadata[len(orig.ClientMetadata)-1].UnmarshalJSON(iter) + } + + case "iP": + { + var ov *RequestContext_IP + if !UseProtoPooling.IsEnabled() { + ov = &RequestContext_IP{} + } else { + ov = ProtoPoolRequestContext_IP.Get().(*RequestContext_IP) + } + ov.IP = NewIPAddr() + ov.IP.UnmarshalJSON(iter) + orig.ClientAddress = ov + } + case "tCP": + { + var ov *RequestContext_TCP + if !UseProtoPooling.IsEnabled() { + ov = &RequestContext_TCP{} + } else { + ov = ProtoPoolRequestContext_TCP.Get().(*RequestContext_TCP) + } + ov.TCP = NewTCPAddr() + ov.TCP.UnmarshalJSON(iter) + orig.ClientAddress = ov + } + case "uDP": + { + var ov *RequestContext_UDP + if !UseProtoPooling.IsEnabled() { + ov = &RequestContext_UDP{} + } else { + ov = ProtoPoolRequestContext_UDP.Get().(*RequestContext_UDP) + } + ov.UDP = NewUDPAddr() + ov.UDP.UnmarshalJSON(iter) + orig.ClientAddress = ov + } + case "unix": + { + var ov *RequestContext_Unix + if !UseProtoPooling.IsEnabled() { + ov = &RequestContext_Unix{} + } else { + ov = ProtoPoolRequestContext_Unix.Get().(*RequestContext_Unix) + } + ov.Unix = NewUnixAddr() + ov.Unix.UnmarshalJSON(iter) + orig.ClientAddress = ov + } + + default: + iter.Skip() + } + } +} + +func (orig *RequestContext) SizeProto() int { + var n int + var l int + _ = l + if orig.SpanContext != nil { + l = orig.SpanContext.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + for i := range orig.ClientMetadata { + l = orig.ClientMetadata[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + switch orig := orig.ClientAddress.(type) { + case nil: + _ = orig + break + case *RequestContext_IP: + if orig.IP != nil { + l = orig.IP.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + case *RequestContext_TCP: + if orig.TCP != nil { + l = orig.TCP.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + case *RequestContext_UDP: + if orig.UDP != nil { + l = orig.UDP.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + case *RequestContext_Unix: + if orig.Unix != nil { + l = orig.Unix.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + } + return n +} + +func (orig *RequestContext) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + if orig.SpanContext != nil { + l = orig.SpanContext.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + for i := len(orig.ClientMetadata) - 1; i >= 0; i-- { + l = orig.ClientMetadata[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + switch orig := orig.ClientAddress.(type) { + case *RequestContext_IP: + if orig.IP != nil { + l = orig.IP.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + case *RequestContext_TCP: + if orig.TCP != nil { + l = orig.TCP.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x22 + } + case *RequestContext_UDP: + if orig.UDP != nil { + l = orig.UDP.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x2a + } + case *RequestContext_Unix: + if orig.Unix != nil { + l = orig.Unix.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x32 + } + } + return len(buf) - pos +} + +func (orig *RequestContext) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field SpanContext", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + orig.SpanContext = NewSpanContext() + err = orig.SpanContext.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ClientMetadata", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ClientMetadata = append(orig.ClientMetadata, KeyValue{}) + err = orig.ClientMetadata[len(orig.ClientMetadata)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var ov *RequestContext_IP + if !UseProtoPooling.IsEnabled() { + ov = &RequestContext_IP{} + } else { + ov = ProtoPoolRequestContext_IP.Get().(*RequestContext_IP) + } + ov.IP = NewIPAddr() + err = ov.IP.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + orig.ClientAddress = ov + + case 4: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field TCP", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var ov *RequestContext_TCP + if !UseProtoPooling.IsEnabled() { + ov = &RequestContext_TCP{} + } else { + ov = ProtoPoolRequestContext_TCP.Get().(*RequestContext_TCP) + } + ov.TCP = NewTCPAddr() + err = ov.TCP.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + orig.ClientAddress = ov + + case 5: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field UDP", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var ov *RequestContext_UDP + if !UseProtoPooling.IsEnabled() { + ov = &RequestContext_UDP{} + } else { + ov = ProtoPoolRequestContext_UDP.Get().(*RequestContext_UDP) + } + ov.UDP = NewUDPAddr() + err = ov.UDP.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + orig.ClientAddress = ov + + case 6: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Unix", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + var ov *RequestContext_Unix + if !UseProtoPooling.IsEnabled() { + ov = &RequestContext_Unix{} + } else { + ov = ProtoPoolRequestContext_Unix.Get().(*RequestContext_Unix) + } + ov.Unix = NewUnixAddr() + err = ov.Unix.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + orig.ClientAddress = ov + + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestRequestContext() *RequestContext { + orig := NewRequestContext() + orig.SpanContext = GenTestSpanContext() + orig.ClientMetadata = []KeyValue{{}, *GenTestKeyValue()} + orig.ClientAddress = &RequestContext_IP{IP: GenTestIPAddr()} + return orig +} + +func GenTestRequestContextPtrSlice() []*RequestContext { + orig := make([]*RequestContext, 5) + orig[0] = NewRequestContext() + orig[1] = GenTestRequestContext() + orig[2] = NewRequestContext() + orig[3] = GenTestRequestContext() + orig[4] = NewRequestContext() + return orig +} + +func GenTestRequestContextSlice() []RequestContext { + orig := make([]RequestContext, 5) + orig[1] = *GenTestRequestContext() + orig[3] = *GenTestRequestContext() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resource.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resource.go new file mode 100644 index 00000000000..0b846c62b56 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resource.go @@ -0,0 +1,322 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// Resource is a message representing the resource information. +type Resource struct { + Attributes []KeyValue + EntityRefs []*EntityRef + DroppedAttributesCount uint32 +} + +var ( + protoPoolResource = sync.Pool{ + New: func() any { + return &Resource{} + }, + } +) + +func NewResource() *Resource { + if !UseProtoPooling.IsEnabled() { + return &Resource{} + } + return protoPoolResource.Get().(*Resource) +} + +func DeleteResource(orig *Resource, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.Attributes { + DeleteKeyValue(&orig.Attributes[i], false) + } + + for i := range orig.EntityRefs { + DeleteEntityRef(orig.EntityRefs[i], true) + } + orig.Reset() + if nullable { + protoPoolResource.Put(orig) + } +} + +func CopyResource(dest, src *Resource) *Resource { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewResource() + } + dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes) + + dest.DroppedAttributesCount = src.DroppedAttributesCount + dest.EntityRefs = CopyEntityRefPtrSlice(dest.EntityRefs, src.EntityRefs) + + return dest +} + +func CopyResourceSlice(dest, src []Resource) []Resource { + var newDest []Resource + if cap(dest) < len(src) { + newDest = make([]Resource, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteResource(&dest[i], false) + } + } + for i := range src { + CopyResource(&newDest[i], &src[i]) + } + return newDest +} + +func CopyResourcePtrSlice(dest, src []*Resource) []*Resource { + var newDest []*Resource + if cap(dest) < len(src) { + newDest = make([]*Resource, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewResource() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteResource(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewResource() + } + } + for i := range src { + CopyResource(newDest[i], src[i]) + } + return newDest +} + +func (orig *Resource) Reset() { + *orig = Resource{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Resource) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.Attributes) > 0 { + dest.WriteObjectField("attributes") + dest.WriteArrayStart() + orig.Attributes[0].MarshalJSON(dest) + for i := 1; i < len(orig.Attributes); i++ { + dest.WriteMore() + orig.Attributes[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + if orig.DroppedAttributesCount != uint32(0) { + dest.WriteObjectField("droppedAttributesCount") + dest.WriteUint32(orig.DroppedAttributesCount) + } + if len(orig.EntityRefs) > 0 { + dest.WriteObjectField("entityRefs") + dest.WriteArrayStart() + orig.EntityRefs[0].MarshalJSON(dest) + for i := 1; i < len(orig.EntityRefs); i++ { + dest.WriteMore() + orig.EntityRefs[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Resource) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "attributes": + for iter.ReadArray() { + orig.Attributes = append(orig.Attributes, KeyValue{}) + orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter) + } + + case "droppedAttributesCount", "dropped_attributes_count": + orig.DroppedAttributesCount = iter.ReadUint32() + case "entityRefs", "entity_refs": + for iter.ReadArray() { + orig.EntityRefs = append(orig.EntityRefs, NewEntityRef()) + orig.EntityRefs[len(orig.EntityRefs)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *Resource) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.Attributes { + l = orig.Attributes[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + if orig.DroppedAttributesCount != uint32(0) { + n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount)) + } + for i := range orig.EntityRefs { + l = orig.EntityRefs[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *Resource) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.Attributes) - 1; i >= 0; i-- { + l = orig.Attributes[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + if orig.DroppedAttributesCount != uint32(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount)) + pos-- + buf[pos] = 0x10 + } + for i := len(orig.EntityRefs) - 1; i >= 0; i-- { + l = orig.EntityRefs[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + return len(buf) - pos +} + +func (orig *Resource) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Attributes = append(orig.Attributes, KeyValue{}) + err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.DroppedAttributesCount = uint32(num) + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field EntityRefs", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.EntityRefs = append(orig.EntityRefs, NewEntityRef()) + err = orig.EntityRefs[len(orig.EntityRefs)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestResource() *Resource { + orig := NewResource() + orig.Attributes = []KeyValue{{}, *GenTestKeyValue()} + orig.DroppedAttributesCount = uint32(13) + orig.EntityRefs = []*EntityRef{{}, GenTestEntityRef()} + return orig +} + +func GenTestResourcePtrSlice() []*Resource { + orig := make([]*Resource, 5) + orig[0] = NewResource() + orig[1] = GenTestResource() + orig[2] = NewResource() + orig[3] = GenTestResource() + orig[4] = NewResource() + return orig +} + +func GenTestResourceSlice() []Resource { + orig := make([]Resource, 5) + orig[1] = *GenTestResource() + orig[3] = *GenTestResource() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcelogs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcelogs.go new file mode 100644 index 00000000000..f7f64566672 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcelogs.go @@ -0,0 +1,364 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ResourceLogs is a collection of logs from a Resource. +type ResourceLogs struct { + Resource Resource + ScopeLogs []*ScopeLogs + SchemaUrl string + DeprecatedScopeLogs []*ScopeLogs +} + +var ( + protoPoolResourceLogs = sync.Pool{ + New: func() any { + return &ResourceLogs{} + }, + } +) + +func NewResourceLogs() *ResourceLogs { + if !UseProtoPooling.IsEnabled() { + return &ResourceLogs{} + } + return protoPoolResourceLogs.Get().(*ResourceLogs) +} + +func DeleteResourceLogs(orig *ResourceLogs, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteResource(&orig.Resource, false) + for i := range orig.ScopeLogs { + DeleteScopeLogs(orig.ScopeLogs[i], true) + } + + for i := range orig.DeprecatedScopeLogs { + DeleteScopeLogs(orig.DeprecatedScopeLogs[i], true) + } + orig.Reset() + if nullable { + protoPoolResourceLogs.Put(orig) + } +} + +func CopyResourceLogs(dest, src *ResourceLogs) *ResourceLogs { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewResourceLogs() + } + CopyResource(&dest.Resource, &src.Resource) + + dest.ScopeLogs = CopyScopeLogsPtrSlice(dest.ScopeLogs, src.ScopeLogs) + + dest.SchemaUrl = src.SchemaUrl + dest.DeprecatedScopeLogs = CopyScopeLogsPtrSlice(dest.DeprecatedScopeLogs, src.DeprecatedScopeLogs) + + return dest +} + +func CopyResourceLogsSlice(dest, src []ResourceLogs) []ResourceLogs { + var newDest []ResourceLogs + if cap(dest) < len(src) { + newDest = make([]ResourceLogs, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteResourceLogs(&dest[i], false) + } + } + for i := range src { + CopyResourceLogs(&newDest[i], &src[i]) + } + return newDest +} + +func CopyResourceLogsPtrSlice(dest, src []*ResourceLogs) []*ResourceLogs { + var newDest []*ResourceLogs + if cap(dest) < len(src) { + newDest = make([]*ResourceLogs, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewResourceLogs() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteResourceLogs(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewResourceLogs() + } + } + for i := range src { + CopyResourceLogs(newDest[i], src[i]) + } + return newDest +} + +func (orig *ResourceLogs) Reset() { + *orig = ResourceLogs{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ResourceLogs) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + dest.WriteObjectField("resource") + orig.Resource.MarshalJSON(dest) + if len(orig.ScopeLogs) > 0 { + dest.WriteObjectField("scopeLogs") + dest.WriteArrayStart() + orig.ScopeLogs[0].MarshalJSON(dest) + for i := 1; i < len(orig.ScopeLogs); i++ { + dest.WriteMore() + orig.ScopeLogs[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + if orig.SchemaUrl != "" { + dest.WriteObjectField("schemaUrl") + dest.WriteString(orig.SchemaUrl) + } + if len(orig.DeprecatedScopeLogs) > 0 { + dest.WriteObjectField("deprecatedScopeLogs") + dest.WriteArrayStart() + orig.DeprecatedScopeLogs[0].MarshalJSON(dest) + for i := 1; i < len(orig.DeprecatedScopeLogs); i++ { + dest.WriteMore() + orig.DeprecatedScopeLogs[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ResourceLogs) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "resource": + + orig.Resource.UnmarshalJSON(iter) + case "scopeLogs", "scope_logs": + for iter.ReadArray() { + orig.ScopeLogs = append(orig.ScopeLogs, NewScopeLogs()) + orig.ScopeLogs[len(orig.ScopeLogs)-1].UnmarshalJSON(iter) + } + + case "schemaUrl", "schema_url": + orig.SchemaUrl = iter.ReadString() + case "deprecatedScopeLogs", "deprecated_scope_logs": + for iter.ReadArray() { + orig.DeprecatedScopeLogs = append(orig.DeprecatedScopeLogs, NewScopeLogs()) + orig.DeprecatedScopeLogs[len(orig.DeprecatedScopeLogs)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *ResourceLogs) SizeProto() int { + var n int + var l int + _ = l + l = orig.Resource.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + for i := range orig.ScopeLogs { + l = orig.ScopeLogs[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.SchemaUrl) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + for i := range orig.DeprecatedScopeLogs { + l = orig.DeprecatedScopeLogs[i].SizeProto() + n += 2 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ResourceLogs) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.Resource.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + for i := len(orig.ScopeLogs) - 1; i >= 0; i-- { + l = orig.ScopeLogs[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + l = len(orig.SchemaUrl) + if l > 0 { + pos -= l + copy(buf[pos:], orig.SchemaUrl) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + for i := len(orig.DeprecatedScopeLogs) - 1; i >= 0; i-- { + l = orig.DeprecatedScopeLogs[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x3e + pos-- + buf[pos] = 0xc2 + } + return len(buf) - pos +} + +func (orig *ResourceLogs) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.Resource.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeLogs", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ScopeLogs = append(orig.ScopeLogs, NewScopeLogs()) + err = orig.ScopeLogs[len(orig.ScopeLogs)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.SchemaUrl = string(buf[startPos:pos]) + + case 1000: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeLogs", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.DeprecatedScopeLogs = append(orig.DeprecatedScopeLogs, NewScopeLogs()) + err = orig.DeprecatedScopeLogs[len(orig.DeprecatedScopeLogs)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestResourceLogs() *ResourceLogs { + orig := NewResourceLogs() + orig.Resource = *GenTestResource() + orig.ScopeLogs = []*ScopeLogs{{}, GenTestScopeLogs()} + orig.SchemaUrl = "test_schemaurl" + orig.DeprecatedScopeLogs = []*ScopeLogs{{}, GenTestScopeLogs()} + return orig +} + +func GenTestResourceLogsPtrSlice() []*ResourceLogs { + orig := make([]*ResourceLogs, 5) + orig[0] = NewResourceLogs() + orig[1] = GenTestResourceLogs() + orig[2] = NewResourceLogs() + orig[3] = GenTestResourceLogs() + orig[4] = NewResourceLogs() + return orig +} + +func GenTestResourceLogsSlice() []ResourceLogs { + orig := make([]ResourceLogs, 5) + orig[1] = *GenTestResourceLogs() + orig[3] = *GenTestResourceLogs() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcemetrics.go new file mode 100644 index 00000000000..dc61ce32cbd --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcemetrics.go @@ -0,0 +1,364 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ResourceMetrics is a collection of metrics from a Resource. +type ResourceMetrics struct { + Resource Resource + ScopeMetrics []*ScopeMetrics + SchemaUrl string + DeprecatedScopeMetrics []*ScopeMetrics +} + +var ( + protoPoolResourceMetrics = sync.Pool{ + New: func() any { + return &ResourceMetrics{} + }, + } +) + +func NewResourceMetrics() *ResourceMetrics { + if !UseProtoPooling.IsEnabled() { + return &ResourceMetrics{} + } + return protoPoolResourceMetrics.Get().(*ResourceMetrics) +} + +func DeleteResourceMetrics(orig *ResourceMetrics, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteResource(&orig.Resource, false) + for i := range orig.ScopeMetrics { + DeleteScopeMetrics(orig.ScopeMetrics[i], true) + } + + for i := range orig.DeprecatedScopeMetrics { + DeleteScopeMetrics(orig.DeprecatedScopeMetrics[i], true) + } + orig.Reset() + if nullable { + protoPoolResourceMetrics.Put(orig) + } +} + +func CopyResourceMetrics(dest, src *ResourceMetrics) *ResourceMetrics { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewResourceMetrics() + } + CopyResource(&dest.Resource, &src.Resource) + + dest.ScopeMetrics = CopyScopeMetricsPtrSlice(dest.ScopeMetrics, src.ScopeMetrics) + + dest.SchemaUrl = src.SchemaUrl + dest.DeprecatedScopeMetrics = CopyScopeMetricsPtrSlice(dest.DeprecatedScopeMetrics, src.DeprecatedScopeMetrics) + + return dest +} + +func CopyResourceMetricsSlice(dest, src []ResourceMetrics) []ResourceMetrics { + var newDest []ResourceMetrics + if cap(dest) < len(src) { + newDest = make([]ResourceMetrics, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteResourceMetrics(&dest[i], false) + } + } + for i := range src { + CopyResourceMetrics(&newDest[i], &src[i]) + } + return newDest +} + +func CopyResourceMetricsPtrSlice(dest, src []*ResourceMetrics) []*ResourceMetrics { + var newDest []*ResourceMetrics + if cap(dest) < len(src) { + newDest = make([]*ResourceMetrics, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewResourceMetrics() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteResourceMetrics(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewResourceMetrics() + } + } + for i := range src { + CopyResourceMetrics(newDest[i], src[i]) + } + return newDest +} + +func (orig *ResourceMetrics) Reset() { + *orig = ResourceMetrics{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ResourceMetrics) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + dest.WriteObjectField("resource") + orig.Resource.MarshalJSON(dest) + if len(orig.ScopeMetrics) > 0 { + dest.WriteObjectField("scopeMetrics") + dest.WriteArrayStart() + orig.ScopeMetrics[0].MarshalJSON(dest) + for i := 1; i < len(orig.ScopeMetrics); i++ { + dest.WriteMore() + orig.ScopeMetrics[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + if orig.SchemaUrl != "" { + dest.WriteObjectField("schemaUrl") + dest.WriteString(orig.SchemaUrl) + } + if len(orig.DeprecatedScopeMetrics) > 0 { + dest.WriteObjectField("deprecatedScopeMetrics") + dest.WriteArrayStart() + orig.DeprecatedScopeMetrics[0].MarshalJSON(dest) + for i := 1; i < len(orig.DeprecatedScopeMetrics); i++ { + dest.WriteMore() + orig.DeprecatedScopeMetrics[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ResourceMetrics) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "resource": + + orig.Resource.UnmarshalJSON(iter) + case "scopeMetrics", "scope_metrics": + for iter.ReadArray() { + orig.ScopeMetrics = append(orig.ScopeMetrics, NewScopeMetrics()) + orig.ScopeMetrics[len(orig.ScopeMetrics)-1].UnmarshalJSON(iter) + } + + case "schemaUrl", "schema_url": + orig.SchemaUrl = iter.ReadString() + case "deprecatedScopeMetrics", "deprecated_scope_metrics": + for iter.ReadArray() { + orig.DeprecatedScopeMetrics = append(orig.DeprecatedScopeMetrics, NewScopeMetrics()) + orig.DeprecatedScopeMetrics[len(orig.DeprecatedScopeMetrics)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *ResourceMetrics) SizeProto() int { + var n int + var l int + _ = l + l = orig.Resource.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + for i := range orig.ScopeMetrics { + l = orig.ScopeMetrics[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.SchemaUrl) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + for i := range orig.DeprecatedScopeMetrics { + l = orig.DeprecatedScopeMetrics[i].SizeProto() + n += 2 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ResourceMetrics) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.Resource.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + for i := len(orig.ScopeMetrics) - 1; i >= 0; i-- { + l = orig.ScopeMetrics[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + l = len(orig.SchemaUrl) + if l > 0 { + pos -= l + copy(buf[pos:], orig.SchemaUrl) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + for i := len(orig.DeprecatedScopeMetrics) - 1; i >= 0; i-- { + l = orig.DeprecatedScopeMetrics[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x3e + pos-- + buf[pos] = 0xc2 + } + return len(buf) - pos +} + +func (orig *ResourceMetrics) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.Resource.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ScopeMetrics = append(orig.ScopeMetrics, NewScopeMetrics()) + err = orig.ScopeMetrics[len(orig.ScopeMetrics)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.SchemaUrl = string(buf[startPos:pos]) + + case 1000: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeMetrics", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.DeprecatedScopeMetrics = append(orig.DeprecatedScopeMetrics, NewScopeMetrics()) + err = orig.DeprecatedScopeMetrics[len(orig.DeprecatedScopeMetrics)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestResourceMetrics() *ResourceMetrics { + orig := NewResourceMetrics() + orig.Resource = *GenTestResource() + orig.ScopeMetrics = []*ScopeMetrics{{}, GenTestScopeMetrics()} + orig.SchemaUrl = "test_schemaurl" + orig.DeprecatedScopeMetrics = []*ScopeMetrics{{}, GenTestScopeMetrics()} + return orig +} + +func GenTestResourceMetricsPtrSlice() []*ResourceMetrics { + orig := make([]*ResourceMetrics, 5) + orig[0] = NewResourceMetrics() + orig[1] = GenTestResourceMetrics() + orig[2] = NewResourceMetrics() + orig[3] = GenTestResourceMetrics() + orig[4] = NewResourceMetrics() + return orig +} + +func GenTestResourceMetricsSlice() []ResourceMetrics { + orig := make([]ResourceMetrics, 5) + orig[1] = *GenTestResourceMetrics() + orig[3] = *GenTestResourceMetrics() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourceprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourceprofiles.go new file mode 100644 index 00000000000..e1b0910724b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourceprofiles.go @@ -0,0 +1,313 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ResourceProfiles is a collection of profiles from a Resource. +type ResourceProfiles struct { + SchemaUrl string + Resource Resource + ScopeProfiles []*ScopeProfiles +} + +var ( + protoPoolResourceProfiles = sync.Pool{ + New: func() any { + return &ResourceProfiles{} + }, + } +) + +func NewResourceProfiles() *ResourceProfiles { + if !UseProtoPooling.IsEnabled() { + return &ResourceProfiles{} + } + return protoPoolResourceProfiles.Get().(*ResourceProfiles) +} + +func DeleteResourceProfiles(orig *ResourceProfiles, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteResource(&orig.Resource, false) + for i := range orig.ScopeProfiles { + DeleteScopeProfiles(orig.ScopeProfiles[i], true) + } + + orig.Reset() + if nullable { + protoPoolResourceProfiles.Put(orig) + } +} + +func CopyResourceProfiles(dest, src *ResourceProfiles) *ResourceProfiles { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewResourceProfiles() + } + CopyResource(&dest.Resource, &src.Resource) + + dest.ScopeProfiles = CopyScopeProfilesPtrSlice(dest.ScopeProfiles, src.ScopeProfiles) + + dest.SchemaUrl = src.SchemaUrl + + return dest +} + +func CopyResourceProfilesSlice(dest, src []ResourceProfiles) []ResourceProfiles { + var newDest []ResourceProfiles + if cap(dest) < len(src) { + newDest = make([]ResourceProfiles, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteResourceProfiles(&dest[i], false) + } + } + for i := range src { + CopyResourceProfiles(&newDest[i], &src[i]) + } + return newDest +} + +func CopyResourceProfilesPtrSlice(dest, src []*ResourceProfiles) []*ResourceProfiles { + var newDest []*ResourceProfiles + if cap(dest) < len(src) { + newDest = make([]*ResourceProfiles, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewResourceProfiles() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteResourceProfiles(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewResourceProfiles() + } + } + for i := range src { + CopyResourceProfiles(newDest[i], src[i]) + } + return newDest +} + +func (orig *ResourceProfiles) Reset() { + *orig = ResourceProfiles{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ResourceProfiles) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + dest.WriteObjectField("resource") + orig.Resource.MarshalJSON(dest) + if len(orig.ScopeProfiles) > 0 { + dest.WriteObjectField("scopeProfiles") + dest.WriteArrayStart() + orig.ScopeProfiles[0].MarshalJSON(dest) + for i := 1; i < len(orig.ScopeProfiles); i++ { + dest.WriteMore() + orig.ScopeProfiles[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + if orig.SchemaUrl != "" { + dest.WriteObjectField("schemaUrl") + dest.WriteString(orig.SchemaUrl) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ResourceProfiles) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "resource": + + orig.Resource.UnmarshalJSON(iter) + case "scopeProfiles", "scope_profiles": + for iter.ReadArray() { + orig.ScopeProfiles = append(orig.ScopeProfiles, NewScopeProfiles()) + orig.ScopeProfiles[len(orig.ScopeProfiles)-1].UnmarshalJSON(iter) + } + + case "schemaUrl", "schema_url": + orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + } +} + +func (orig *ResourceProfiles) SizeProto() int { + var n int + var l int + _ = l + l = orig.Resource.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + for i := range orig.ScopeProfiles { + l = orig.ScopeProfiles[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.SchemaUrl) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ResourceProfiles) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.Resource.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + for i := len(orig.ScopeProfiles) - 1; i >= 0; i-- { + l = orig.ScopeProfiles[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + l = len(orig.SchemaUrl) + if l > 0 { + pos -= l + copy(buf[pos:], orig.SchemaUrl) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + return len(buf) - pos +} + +func (orig *ResourceProfiles) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.Resource.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ScopeProfiles = append(orig.ScopeProfiles, NewScopeProfiles()) + err = orig.ScopeProfiles[len(orig.ScopeProfiles)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.SchemaUrl = string(buf[startPos:pos]) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestResourceProfiles() *ResourceProfiles { + orig := NewResourceProfiles() + orig.Resource = *GenTestResource() + orig.ScopeProfiles = []*ScopeProfiles{{}, GenTestScopeProfiles()} + orig.SchemaUrl = "test_schemaurl" + return orig +} + +func GenTestResourceProfilesPtrSlice() []*ResourceProfiles { + orig := make([]*ResourceProfiles, 5) + orig[0] = NewResourceProfiles() + orig[1] = GenTestResourceProfiles() + orig[2] = NewResourceProfiles() + orig[3] = GenTestResourceProfiles() + orig[4] = NewResourceProfiles() + return orig +} + +func GenTestResourceProfilesSlice() []ResourceProfiles { + orig := make([]ResourceProfiles, 5) + orig[1] = *GenTestResourceProfiles() + orig[3] = *GenTestResourceProfiles() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcespans.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcespans.go new file mode 100644 index 00000000000..fd29e905728 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcespans.go @@ -0,0 +1,364 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ResourceSpans is a collection of spans from a Resource. +type ResourceSpans struct { + Resource Resource + ScopeSpans []*ScopeSpans + SchemaUrl string + DeprecatedScopeSpans []*ScopeSpans +} + +var ( + protoPoolResourceSpans = sync.Pool{ + New: func() any { + return &ResourceSpans{} + }, + } +) + +func NewResourceSpans() *ResourceSpans { + if !UseProtoPooling.IsEnabled() { + return &ResourceSpans{} + } + return protoPoolResourceSpans.Get().(*ResourceSpans) +} + +func DeleteResourceSpans(orig *ResourceSpans, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteResource(&orig.Resource, false) + for i := range orig.ScopeSpans { + DeleteScopeSpans(orig.ScopeSpans[i], true) + } + + for i := range orig.DeprecatedScopeSpans { + DeleteScopeSpans(orig.DeprecatedScopeSpans[i], true) + } + orig.Reset() + if nullable { + protoPoolResourceSpans.Put(orig) + } +} + +func CopyResourceSpans(dest, src *ResourceSpans) *ResourceSpans { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewResourceSpans() + } + CopyResource(&dest.Resource, &src.Resource) + + dest.ScopeSpans = CopyScopeSpansPtrSlice(dest.ScopeSpans, src.ScopeSpans) + + dest.SchemaUrl = src.SchemaUrl + dest.DeprecatedScopeSpans = CopyScopeSpansPtrSlice(dest.DeprecatedScopeSpans, src.DeprecatedScopeSpans) + + return dest +} + +func CopyResourceSpansSlice(dest, src []ResourceSpans) []ResourceSpans { + var newDest []ResourceSpans + if cap(dest) < len(src) { + newDest = make([]ResourceSpans, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteResourceSpans(&dest[i], false) + } + } + for i := range src { + CopyResourceSpans(&newDest[i], &src[i]) + } + return newDest +} + +func CopyResourceSpansPtrSlice(dest, src []*ResourceSpans) []*ResourceSpans { + var newDest []*ResourceSpans + if cap(dest) < len(src) { + newDest = make([]*ResourceSpans, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewResourceSpans() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteResourceSpans(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewResourceSpans() + } + } + for i := range src { + CopyResourceSpans(newDest[i], src[i]) + } + return newDest +} + +func (orig *ResourceSpans) Reset() { + *orig = ResourceSpans{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ResourceSpans) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + dest.WriteObjectField("resource") + orig.Resource.MarshalJSON(dest) + if len(orig.ScopeSpans) > 0 { + dest.WriteObjectField("scopeSpans") + dest.WriteArrayStart() + orig.ScopeSpans[0].MarshalJSON(dest) + for i := 1; i < len(orig.ScopeSpans); i++ { + dest.WriteMore() + orig.ScopeSpans[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + if orig.SchemaUrl != "" { + dest.WriteObjectField("schemaUrl") + dest.WriteString(orig.SchemaUrl) + } + if len(orig.DeprecatedScopeSpans) > 0 { + dest.WriteObjectField("deprecatedScopeSpans") + dest.WriteArrayStart() + orig.DeprecatedScopeSpans[0].MarshalJSON(dest) + for i := 1; i < len(orig.DeprecatedScopeSpans); i++ { + dest.WriteMore() + orig.DeprecatedScopeSpans[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ResourceSpans) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "resource": + + orig.Resource.UnmarshalJSON(iter) + case "scopeSpans", "scope_spans": + for iter.ReadArray() { + orig.ScopeSpans = append(orig.ScopeSpans, NewScopeSpans()) + orig.ScopeSpans[len(orig.ScopeSpans)-1].UnmarshalJSON(iter) + } + + case "schemaUrl", "schema_url": + orig.SchemaUrl = iter.ReadString() + case "deprecatedScopeSpans", "deprecated_scope_spans": + for iter.ReadArray() { + orig.DeprecatedScopeSpans = append(orig.DeprecatedScopeSpans, NewScopeSpans()) + orig.DeprecatedScopeSpans[len(orig.DeprecatedScopeSpans)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *ResourceSpans) SizeProto() int { + var n int + var l int + _ = l + l = orig.Resource.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + for i := range orig.ScopeSpans { + l = orig.ScopeSpans[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.SchemaUrl) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + for i := range orig.DeprecatedScopeSpans { + l = orig.DeprecatedScopeSpans[i].SizeProto() + n += 2 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ResourceSpans) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.Resource.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + for i := len(orig.ScopeSpans) - 1; i >= 0; i-- { + l = orig.ScopeSpans[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + l = len(orig.SchemaUrl) + if l > 0 { + pos -= l + copy(buf[pos:], orig.SchemaUrl) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + for i := len(orig.DeprecatedScopeSpans) - 1; i >= 0; i-- { + l = orig.DeprecatedScopeSpans[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x3e + pos-- + buf[pos] = 0xc2 + } + return len(buf) - pos +} + +func (orig *ResourceSpans) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.Resource.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeSpans", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ScopeSpans = append(orig.ScopeSpans, NewScopeSpans()) + err = orig.ScopeSpans[len(orig.ScopeSpans)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.SchemaUrl = string(buf[startPos:pos]) + + case 1000: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeSpans", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.DeprecatedScopeSpans = append(orig.DeprecatedScopeSpans, NewScopeSpans()) + err = orig.DeprecatedScopeSpans[len(orig.DeprecatedScopeSpans)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestResourceSpans() *ResourceSpans { + orig := NewResourceSpans() + orig.Resource = *GenTestResource() + orig.ScopeSpans = []*ScopeSpans{{}, GenTestScopeSpans()} + orig.SchemaUrl = "test_schemaurl" + orig.DeprecatedScopeSpans = []*ScopeSpans{{}, GenTestScopeSpans()} + return orig +} + +func GenTestResourceSpansPtrSlice() []*ResourceSpans { + orig := make([]*ResourceSpans, 5) + orig[0] = NewResourceSpans() + orig[1] = GenTestResourceSpans() + orig[2] = NewResourceSpans() + orig[3] = GenTestResourceSpans() + orig[4] = NewResourceSpans() + return orig +} + +func GenTestResourceSpansSlice() []ResourceSpans { + orig := make([]ResourceSpans, 5) + orig[1] = *GenTestResourceSpans() + orig[3] = *GenTestResourceSpans() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sample.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sample.go similarity index 70% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sample.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sample.go index 902b55aed89..7a67e83f6dc 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sample.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sample.go @@ -11,27 +11,35 @@ import ( "fmt" "sync" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// Sample represents each record value encountered within a profiled program. +type Sample struct { + Values []int64 + AttributeIndices []int32 + TimestampsUnixNano []uint64 + StackIndex int32 + LinkIndex int32 +} + var ( protoPoolSample = sync.Pool{ New: func() any { - return &otlpprofiles.Sample{} + return &Sample{} }, } ) -func NewOrigSample() *otlpprofiles.Sample { +func NewSample() *Sample { if !UseProtoPooling.IsEnabled() { - return &otlpprofiles.Sample{} + return &Sample{} } - return protoPoolSample.Get().(*otlpprofiles.Sample) + return protoPoolSample.Get().(*Sample) } -func DeleteOrigSample(orig *otlpprofiles.Sample, nullable bool) { +func DeleteSample(orig *Sample, nullable bool) { if orig == nil { return } @@ -47,30 +55,84 @@ func DeleteOrigSample(orig *otlpprofiles.Sample, nullable bool) { } } -func CopyOrigSample(dest, src *otlpprofiles.Sample) { +func CopySample(dest, src *Sample) *Sample { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewSample() } dest.StackIndex = src.StackIndex - dest.Values = CopyOrigInt64Slice(dest.Values, src.Values) - dest.AttributeIndices = CopyOrigInt32Slice(dest.AttributeIndices, src.AttributeIndices) + dest.Values = append(dest.Values[:0], src.Values...) + + dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...) + dest.LinkIndex = src.LinkIndex - dest.TimestampsUnixNano = CopyOrigUint64Slice(dest.TimestampsUnixNano, src.TimestampsUnixNano) + dest.TimestampsUnixNano = append(dest.TimestampsUnixNano[:0], src.TimestampsUnixNano...) + + return dest } -func GenTestOrigSample() *otlpprofiles.Sample { - orig := NewOrigSample() - orig.StackIndex = int32(13) - orig.Values = GenerateOrigTestInt64Slice() - orig.AttributeIndices = GenerateOrigTestInt32Slice() - orig.LinkIndex = int32(13) - orig.TimestampsUnixNano = GenerateOrigTestUint64Slice() - return orig +func CopySampleSlice(dest, src []Sample) []Sample { + var newDest []Sample + if cap(dest) < len(src) { + newDest = make([]Sample, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSample(&dest[i], false) + } + } + for i := range src { + CopySample(&newDest[i], &src[i]) + } + return newDest +} + +func CopySamplePtrSlice(dest, src []*Sample) []*Sample { + var newDest []*Sample + if cap(dest) < len(src) { + newDest = make([]*Sample, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSample() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSample(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSample() + } + } + for i := range src { + CopySample(newDest[i], src[i]) + } + return newDest } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigSample(orig *otlpprofiles.Sample, dest *json.Stream) { +func (orig *Sample) Reset() { + *orig = Sample{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Sample) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() if orig.StackIndex != int32(0) { dest.WriteObjectField("stackIndex") @@ -86,6 +148,7 @@ func MarshalJSONOrigSample(orig *otlpprofiles.Sample, dest *json.Stream) { } dest.WriteArrayEnd() } + if len(orig.AttributeIndices) > 0 { dest.WriteObjectField("attributeIndices") dest.WriteArrayStart() @@ -96,6 +159,7 @@ func MarshalJSONOrigSample(orig *otlpprofiles.Sample, dest *json.Stream) { } dest.WriteArrayEnd() } + if orig.LinkIndex != int32(0) { dest.WriteObjectField("linkIndex") dest.WriteInt32(orig.LinkIndex) @@ -110,11 +174,12 @@ func MarshalJSONOrigSample(orig *otlpprofiles.Sample, dest *json.Stream) { } dest.WriteArrayEnd() } + dest.WriteObjectEnd() } -// UnmarshalJSONOrigSample unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigSample(orig *otlpprofiles.Sample, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Sample) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "stackIndex", "stack_index": @@ -142,13 +207,14 @@ func UnmarshalJSONOrigSample(orig *otlpprofiles.Sample, iter *json.Iterator) { } } -func SizeProtoOrigSample(orig *otlpprofiles.Sample) int { +func (orig *Sample) SizeProto() int { var n int var l int _ = l - if orig.StackIndex != 0 { + if orig.StackIndex != int32(0) { n += 1 + proto.Sov(uint64(orig.StackIndex)) } + if len(orig.Values) > 0 { l = 0 for _, e := range orig.Values { @@ -156,6 +222,7 @@ func SizeProtoOrigSample(orig *otlpprofiles.Sample) int { } n += 1 + proto.Sov(uint64(l)) + l } + if len(orig.AttributeIndices) > 0 { l = 0 for _, e := range orig.AttributeIndices { @@ -163,7 +230,7 @@ func SizeProtoOrigSample(orig *otlpprofiles.Sample) int { } n += 1 + proto.Sov(uint64(l)) + l } - if orig.LinkIndex != 0 { + if orig.LinkIndex != int32(0) { n += 1 + proto.Sov(uint64(orig.LinkIndex)) } l = len(orig.TimestampsUnixNano) @@ -174,11 +241,11 @@ func SizeProtoOrigSample(orig *otlpprofiles.Sample) int { return n } -func MarshalProtoOrigSample(orig *otlpprofiles.Sample, buf []byte) int { +func (orig *Sample) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l - if orig.StackIndex != 0 { + if orig.StackIndex != int32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.StackIndex)) pos-- buf[pos] = 0x8 @@ -203,7 +270,7 @@ func MarshalProtoOrigSample(orig *otlpprofiles.Sample, buf []byte) int { pos-- buf[pos] = 0x1a } - if orig.LinkIndex != 0 { + if orig.LinkIndex != int32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.LinkIndex)) pos-- buf[pos] = 0x20 @@ -221,7 +288,7 @@ func MarshalProtoOrigSample(orig *otlpprofiles.Sample, buf []byte) int { return len(buf) - pos } -func UnmarshalProtoOrigSample(orig *otlpprofiles.Sample, buf []byte) error { +func (orig *Sample) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -245,7 +312,6 @@ func UnmarshalProtoOrigSample(orig *otlpprofiles.Sample, buf []byte) error { if err != nil { return err } - orig.StackIndex = int32(num) case 2: switch wireType { @@ -317,7 +383,6 @@ func UnmarshalProtoOrigSample(orig *otlpprofiles.Sample, buf []byte) error { if err != nil { return err } - orig.LinkIndex = int32(num) case 5: switch wireType { @@ -360,3 +425,30 @@ func UnmarshalProtoOrigSample(orig *otlpprofiles.Sample, buf []byte) error { } return nil } + +func GenTestSample() *Sample { + orig := NewSample() + orig.StackIndex = int32(13) + orig.Values = []int64{int64(0), int64(13)} + orig.AttributeIndices = []int32{int32(0), int32(13)} + orig.LinkIndex = int32(13) + orig.TimestampsUnixNano = []uint64{uint64(0), uint64(13)} + return orig +} + +func GenTestSamplePtrSlice() []*Sample { + orig := make([]*Sample, 5) + orig[0] = NewSample() + orig[1] = GenTestSample() + orig[2] = NewSample() + orig[3] = GenTestSample() + orig[4] = NewSample() + return orig +} + +func GenTestSampleSlice() []Sample { + orig := make([]Sample, 5) + orig[1] = *GenTestSample() + orig[3] = *GenTestSample() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopelogs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopelogs.go new file mode 100644 index 00000000000..606af0ffd39 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopelogs.go @@ -0,0 +1,313 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ScopeLogs is a collection of logs from a LibraryInstrumentation. +type ScopeLogs struct { + SchemaUrl string + LogRecords []*LogRecord + Scope InstrumentationScope +} + +var ( + protoPoolScopeLogs = sync.Pool{ + New: func() any { + return &ScopeLogs{} + }, + } +) + +func NewScopeLogs() *ScopeLogs { + if !UseProtoPooling.IsEnabled() { + return &ScopeLogs{} + } + return protoPoolScopeLogs.Get().(*ScopeLogs) +} + +func DeleteScopeLogs(orig *ScopeLogs, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteInstrumentationScope(&orig.Scope, false) + for i := range orig.LogRecords { + DeleteLogRecord(orig.LogRecords[i], true) + } + + orig.Reset() + if nullable { + protoPoolScopeLogs.Put(orig) + } +} + +func CopyScopeLogs(dest, src *ScopeLogs) *ScopeLogs { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewScopeLogs() + } + CopyInstrumentationScope(&dest.Scope, &src.Scope) + + dest.LogRecords = CopyLogRecordPtrSlice(dest.LogRecords, src.LogRecords) + + dest.SchemaUrl = src.SchemaUrl + + return dest +} + +func CopyScopeLogsSlice(dest, src []ScopeLogs) []ScopeLogs { + var newDest []ScopeLogs + if cap(dest) < len(src) { + newDest = make([]ScopeLogs, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteScopeLogs(&dest[i], false) + } + } + for i := range src { + CopyScopeLogs(&newDest[i], &src[i]) + } + return newDest +} + +func CopyScopeLogsPtrSlice(dest, src []*ScopeLogs) []*ScopeLogs { + var newDest []*ScopeLogs + if cap(dest) < len(src) { + newDest = make([]*ScopeLogs, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewScopeLogs() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteScopeLogs(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewScopeLogs() + } + } + for i := range src { + CopyScopeLogs(newDest[i], src[i]) + } + return newDest +} + +func (orig *ScopeLogs) Reset() { + *orig = ScopeLogs{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ScopeLogs) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + dest.WriteObjectField("scope") + orig.Scope.MarshalJSON(dest) + if len(orig.LogRecords) > 0 { + dest.WriteObjectField("logRecords") + dest.WriteArrayStart() + orig.LogRecords[0].MarshalJSON(dest) + for i := 1; i < len(orig.LogRecords); i++ { + dest.WriteMore() + orig.LogRecords[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + if orig.SchemaUrl != "" { + dest.WriteObjectField("schemaUrl") + dest.WriteString(orig.SchemaUrl) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ScopeLogs) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "scope": + + orig.Scope.UnmarshalJSON(iter) + case "logRecords", "log_records": + for iter.ReadArray() { + orig.LogRecords = append(orig.LogRecords, NewLogRecord()) + orig.LogRecords[len(orig.LogRecords)-1].UnmarshalJSON(iter) + } + + case "schemaUrl", "schema_url": + orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + } +} + +func (orig *ScopeLogs) SizeProto() int { + var n int + var l int + _ = l + l = orig.Scope.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + for i := range orig.LogRecords { + l = orig.LogRecords[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.SchemaUrl) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ScopeLogs) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.Scope.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + for i := len(orig.LogRecords) - 1; i >= 0; i-- { + l = orig.LogRecords[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + l = len(orig.SchemaUrl) + if l > 0 { + pos -= l + copy(buf[pos:], orig.SchemaUrl) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + return len(buf) - pos +} + +func (orig *ScopeLogs) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.Scope.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field LogRecords", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.LogRecords = append(orig.LogRecords, NewLogRecord()) + err = orig.LogRecords[len(orig.LogRecords)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.SchemaUrl = string(buf[startPos:pos]) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestScopeLogs() *ScopeLogs { + orig := NewScopeLogs() + orig.Scope = *GenTestInstrumentationScope() + orig.LogRecords = []*LogRecord{{}, GenTestLogRecord()} + orig.SchemaUrl = "test_schemaurl" + return orig +} + +func GenTestScopeLogsPtrSlice() []*ScopeLogs { + orig := make([]*ScopeLogs, 5) + orig[0] = NewScopeLogs() + orig[1] = GenTestScopeLogs() + orig[2] = NewScopeLogs() + orig[3] = GenTestScopeLogs() + orig[4] = NewScopeLogs() + return orig +} + +func GenTestScopeLogsSlice() []ScopeLogs { + orig := make([]ScopeLogs, 5) + orig[1] = *GenTestScopeLogs() + orig[3] = *GenTestScopeLogs() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopemetrics.go new file mode 100644 index 00000000000..5c7a9d1d556 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopemetrics.go @@ -0,0 +1,313 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ScopeMetrics is a collection of metrics from a LibraryInstrumentation. +type ScopeMetrics struct { + SchemaUrl string + Metrics []*Metric + Scope InstrumentationScope +} + +var ( + protoPoolScopeMetrics = sync.Pool{ + New: func() any { + return &ScopeMetrics{} + }, + } +) + +func NewScopeMetrics() *ScopeMetrics { + if !UseProtoPooling.IsEnabled() { + return &ScopeMetrics{} + } + return protoPoolScopeMetrics.Get().(*ScopeMetrics) +} + +func DeleteScopeMetrics(orig *ScopeMetrics, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteInstrumentationScope(&orig.Scope, false) + for i := range orig.Metrics { + DeleteMetric(orig.Metrics[i], true) + } + + orig.Reset() + if nullable { + protoPoolScopeMetrics.Put(orig) + } +} + +func CopyScopeMetrics(dest, src *ScopeMetrics) *ScopeMetrics { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewScopeMetrics() + } + CopyInstrumentationScope(&dest.Scope, &src.Scope) + + dest.Metrics = CopyMetricPtrSlice(dest.Metrics, src.Metrics) + + dest.SchemaUrl = src.SchemaUrl + + return dest +} + +func CopyScopeMetricsSlice(dest, src []ScopeMetrics) []ScopeMetrics { + var newDest []ScopeMetrics + if cap(dest) < len(src) { + newDest = make([]ScopeMetrics, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteScopeMetrics(&dest[i], false) + } + } + for i := range src { + CopyScopeMetrics(&newDest[i], &src[i]) + } + return newDest +} + +func CopyScopeMetricsPtrSlice(dest, src []*ScopeMetrics) []*ScopeMetrics { + var newDest []*ScopeMetrics + if cap(dest) < len(src) { + newDest = make([]*ScopeMetrics, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewScopeMetrics() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteScopeMetrics(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewScopeMetrics() + } + } + for i := range src { + CopyScopeMetrics(newDest[i], src[i]) + } + return newDest +} + +func (orig *ScopeMetrics) Reset() { + *orig = ScopeMetrics{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ScopeMetrics) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + dest.WriteObjectField("scope") + orig.Scope.MarshalJSON(dest) + if len(orig.Metrics) > 0 { + dest.WriteObjectField("metrics") + dest.WriteArrayStart() + orig.Metrics[0].MarshalJSON(dest) + for i := 1; i < len(orig.Metrics); i++ { + dest.WriteMore() + orig.Metrics[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + if orig.SchemaUrl != "" { + dest.WriteObjectField("schemaUrl") + dest.WriteString(orig.SchemaUrl) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ScopeMetrics) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "scope": + + orig.Scope.UnmarshalJSON(iter) + case "metrics": + for iter.ReadArray() { + orig.Metrics = append(orig.Metrics, NewMetric()) + orig.Metrics[len(orig.Metrics)-1].UnmarshalJSON(iter) + } + + case "schemaUrl", "schema_url": + orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + } +} + +func (orig *ScopeMetrics) SizeProto() int { + var n int + var l int + _ = l + l = orig.Scope.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + for i := range orig.Metrics { + l = orig.Metrics[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.SchemaUrl) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ScopeMetrics) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.Scope.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + for i := len(orig.Metrics) - 1; i >= 0; i-- { + l = orig.Metrics[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + l = len(orig.SchemaUrl) + if l > 0 { + pos -= l + copy(buf[pos:], orig.SchemaUrl) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + return len(buf) - pos +} + +func (orig *ScopeMetrics) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.Scope.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Metrics = append(orig.Metrics, NewMetric()) + err = orig.Metrics[len(orig.Metrics)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.SchemaUrl = string(buf[startPos:pos]) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestScopeMetrics() *ScopeMetrics { + orig := NewScopeMetrics() + orig.Scope = *GenTestInstrumentationScope() + orig.Metrics = []*Metric{{}, GenTestMetric()} + orig.SchemaUrl = "test_schemaurl" + return orig +} + +func GenTestScopeMetricsPtrSlice() []*ScopeMetrics { + orig := make([]*ScopeMetrics, 5) + orig[0] = NewScopeMetrics() + orig[1] = GenTestScopeMetrics() + orig[2] = NewScopeMetrics() + orig[3] = GenTestScopeMetrics() + orig[4] = NewScopeMetrics() + return orig +} + +func GenTestScopeMetricsSlice() []ScopeMetrics { + orig := make([]ScopeMetrics, 5) + orig[1] = *GenTestScopeMetrics() + orig[3] = *GenTestScopeMetrics() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopeprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopeprofiles.go new file mode 100644 index 00000000000..b88f3b1e1e2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopeprofiles.go @@ -0,0 +1,313 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ScopeProfiles is a collection of profiles from a LibraryInstrumentation. +type ScopeProfiles struct { + SchemaUrl string + Profiles []*Profile + Scope InstrumentationScope +} + +var ( + protoPoolScopeProfiles = sync.Pool{ + New: func() any { + return &ScopeProfiles{} + }, + } +) + +func NewScopeProfiles() *ScopeProfiles { + if !UseProtoPooling.IsEnabled() { + return &ScopeProfiles{} + } + return protoPoolScopeProfiles.Get().(*ScopeProfiles) +} + +func DeleteScopeProfiles(orig *ScopeProfiles, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteInstrumentationScope(&orig.Scope, false) + for i := range orig.Profiles { + DeleteProfile(orig.Profiles[i], true) + } + + orig.Reset() + if nullable { + protoPoolScopeProfiles.Put(orig) + } +} + +func CopyScopeProfiles(dest, src *ScopeProfiles) *ScopeProfiles { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewScopeProfiles() + } + CopyInstrumentationScope(&dest.Scope, &src.Scope) + + dest.Profiles = CopyProfilePtrSlice(dest.Profiles, src.Profiles) + + dest.SchemaUrl = src.SchemaUrl + + return dest +} + +func CopyScopeProfilesSlice(dest, src []ScopeProfiles) []ScopeProfiles { + var newDest []ScopeProfiles + if cap(dest) < len(src) { + newDest = make([]ScopeProfiles, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteScopeProfiles(&dest[i], false) + } + } + for i := range src { + CopyScopeProfiles(&newDest[i], &src[i]) + } + return newDest +} + +func CopyScopeProfilesPtrSlice(dest, src []*ScopeProfiles) []*ScopeProfiles { + var newDest []*ScopeProfiles + if cap(dest) < len(src) { + newDest = make([]*ScopeProfiles, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewScopeProfiles() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteScopeProfiles(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewScopeProfiles() + } + } + for i := range src { + CopyScopeProfiles(newDest[i], src[i]) + } + return newDest +} + +func (orig *ScopeProfiles) Reset() { + *orig = ScopeProfiles{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ScopeProfiles) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + dest.WriteObjectField("scope") + orig.Scope.MarshalJSON(dest) + if len(orig.Profiles) > 0 { + dest.WriteObjectField("profiles") + dest.WriteArrayStart() + orig.Profiles[0].MarshalJSON(dest) + for i := 1; i < len(orig.Profiles); i++ { + dest.WriteMore() + orig.Profiles[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + if orig.SchemaUrl != "" { + dest.WriteObjectField("schemaUrl") + dest.WriteString(orig.SchemaUrl) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ScopeProfiles) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "scope": + + orig.Scope.UnmarshalJSON(iter) + case "profiles": + for iter.ReadArray() { + orig.Profiles = append(orig.Profiles, NewProfile()) + orig.Profiles[len(orig.Profiles)-1].UnmarshalJSON(iter) + } + + case "schemaUrl", "schema_url": + orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + } +} + +func (orig *ScopeProfiles) SizeProto() int { + var n int + var l int + _ = l + l = orig.Scope.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + for i := range orig.Profiles { + l = orig.Profiles[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.SchemaUrl) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ScopeProfiles) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.Scope.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + for i := len(orig.Profiles) - 1; i >= 0; i-- { + l = orig.Profiles[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + l = len(orig.SchemaUrl) + if l > 0 { + pos -= l + copy(buf[pos:], orig.SchemaUrl) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + return len(buf) - pos +} + +func (orig *ScopeProfiles) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.Scope.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Profiles = append(orig.Profiles, NewProfile()) + err = orig.Profiles[len(orig.Profiles)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.SchemaUrl = string(buf[startPos:pos]) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestScopeProfiles() *ScopeProfiles { + orig := NewScopeProfiles() + orig.Scope = *GenTestInstrumentationScope() + orig.Profiles = []*Profile{{}, GenTestProfile()} + orig.SchemaUrl = "test_schemaurl" + return orig +} + +func GenTestScopeProfilesPtrSlice() []*ScopeProfiles { + orig := make([]*ScopeProfiles, 5) + orig[0] = NewScopeProfiles() + orig[1] = GenTestScopeProfiles() + orig[2] = NewScopeProfiles() + orig[3] = GenTestScopeProfiles() + orig[4] = NewScopeProfiles() + return orig +} + +func GenTestScopeProfilesSlice() []ScopeProfiles { + orig := make([]ScopeProfiles, 5) + orig[1] = *GenTestScopeProfiles() + orig[3] = *GenTestScopeProfiles() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopespans.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopespans.go new file mode 100644 index 00000000000..6a1966527ab --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopespans.go @@ -0,0 +1,313 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ScopeSpans is a collection of spans from a LibraryInstrumentation. +type ScopeSpans struct { + SchemaUrl string + Spans []*Span + Scope InstrumentationScope +} + +var ( + protoPoolScopeSpans = sync.Pool{ + New: func() any { + return &ScopeSpans{} + }, + } +) + +func NewScopeSpans() *ScopeSpans { + if !UseProtoPooling.IsEnabled() { + return &ScopeSpans{} + } + return protoPoolScopeSpans.Get().(*ScopeSpans) +} + +func DeleteScopeSpans(orig *ScopeSpans, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteInstrumentationScope(&orig.Scope, false) + for i := range orig.Spans { + DeleteSpan(orig.Spans[i], true) + } + + orig.Reset() + if nullable { + protoPoolScopeSpans.Put(orig) + } +} + +func CopyScopeSpans(dest, src *ScopeSpans) *ScopeSpans { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewScopeSpans() + } + CopyInstrumentationScope(&dest.Scope, &src.Scope) + + dest.Spans = CopySpanPtrSlice(dest.Spans, src.Spans) + + dest.SchemaUrl = src.SchemaUrl + + return dest +} + +func CopyScopeSpansSlice(dest, src []ScopeSpans) []ScopeSpans { + var newDest []ScopeSpans + if cap(dest) < len(src) { + newDest = make([]ScopeSpans, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteScopeSpans(&dest[i], false) + } + } + for i := range src { + CopyScopeSpans(&newDest[i], &src[i]) + } + return newDest +} + +func CopyScopeSpansPtrSlice(dest, src []*ScopeSpans) []*ScopeSpans { + var newDest []*ScopeSpans + if cap(dest) < len(src) { + newDest = make([]*ScopeSpans, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewScopeSpans() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteScopeSpans(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewScopeSpans() + } + } + for i := range src { + CopyScopeSpans(newDest[i], src[i]) + } + return newDest +} + +func (orig *ScopeSpans) Reset() { + *orig = ScopeSpans{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ScopeSpans) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + dest.WriteObjectField("scope") + orig.Scope.MarshalJSON(dest) + if len(orig.Spans) > 0 { + dest.WriteObjectField("spans") + dest.WriteArrayStart() + orig.Spans[0].MarshalJSON(dest) + for i := 1; i < len(orig.Spans); i++ { + dest.WriteMore() + orig.Spans[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + if orig.SchemaUrl != "" { + dest.WriteObjectField("schemaUrl") + dest.WriteString(orig.SchemaUrl) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ScopeSpans) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "scope": + + orig.Scope.UnmarshalJSON(iter) + case "spans": + for iter.ReadArray() { + orig.Spans = append(orig.Spans, NewSpan()) + orig.Spans[len(orig.Spans)-1].UnmarshalJSON(iter) + } + + case "schemaUrl", "schema_url": + orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + } +} + +func (orig *ScopeSpans) SizeProto() int { + var n int + var l int + _ = l + l = orig.Scope.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + for i := range orig.Spans { + l = orig.Spans[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.SchemaUrl) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *ScopeSpans) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.Scope.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + for i := len(orig.Spans) - 1; i >= 0; i-- { + l = orig.Spans[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + l = len(orig.SchemaUrl) + if l > 0 { + pos -= l + copy(buf[pos:], orig.SchemaUrl) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + return len(buf) - pos +} + +func (orig *ScopeSpans) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.Scope.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Spans = append(orig.Spans, NewSpan()) + err = orig.Spans[len(orig.Spans)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.SchemaUrl = string(buf[startPos:pos]) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestScopeSpans() *ScopeSpans { + orig := NewScopeSpans() + orig.Scope = *GenTestInstrumentationScope() + orig.Spans = []*Span{{}, GenTestSpan()} + orig.SchemaUrl = "test_schemaurl" + return orig +} + +func GenTestScopeSpansPtrSlice() []*ScopeSpans { + orig := make([]*ScopeSpans, 5) + orig[0] = NewScopeSpans() + orig[1] = GenTestScopeSpans() + orig[2] = NewScopeSpans() + orig[3] = GenTestScopeSpans() + orig[4] = NewScopeSpans() + return orig +} + +func GenTestScopeSpansSlice() []ScopeSpans { + orig := make([]ScopeSpans, 5) + orig[1] = *GenTestScopeSpans() + orig[3] = *GenTestScopeSpans() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_span.go similarity index 63% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_span.go index d53678b0b2e..4df8e982e90 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_span.go @@ -11,29 +11,47 @@ import ( "fmt" "sync" - "go.opentelemetry.io/collector/pdata/internal/data" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// Span represents a single operation within a trace. +// See Span definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto +type Span struct { + TraceState string + Name string + Attributes []KeyValue + Events []*SpanEvent + Links []*SpanLink + Status Status + StartTimeUnixNano uint64 + EndTimeUnixNano uint64 + Flags uint32 + Kind SpanKind + DroppedAttributesCount uint32 + DroppedEventsCount uint32 + DroppedLinksCount uint32 + TraceId TraceID + SpanId SpanID + ParentSpanId SpanID +} + var ( protoPoolSpan = sync.Pool{ New: func() any { - return &otlptrace.Span{} + return &Span{} }, } ) -func NewOrigSpan() *otlptrace.Span { +func NewSpan() *Span { if !UseProtoPooling.IsEnabled() { - return &otlptrace.Span{} + return &Span{} } - return protoPoolSpan.Get().(*otlptrace.Span) + return protoPoolSpan.Get().(*Span) } -func DeleteOrigSpan(orig *otlptrace.Span, nullable bool) { +func DeleteSpan(orig *Span, nullable bool) { if orig == nil { return } @@ -42,89 +60,139 @@ func DeleteOrigSpan(orig *otlptrace.Span, nullable bool) { orig.Reset() return } + DeleteTraceID(&orig.TraceId, false) + DeleteSpanID(&orig.SpanId, false) + + DeleteSpanID(&orig.ParentSpanId, false) - DeleteOrigTraceID(&orig.TraceId, false) - DeleteOrigSpanID(&orig.SpanId, false) - DeleteOrigSpanID(&orig.ParentSpanId, false) for i := range orig.Attributes { - DeleteOrigKeyValue(&orig.Attributes[i], false) + DeleteKeyValue(&orig.Attributes[i], false) } + for i := range orig.Events { - DeleteOrigSpan_Event(orig.Events[i], true) + DeleteSpanEvent(orig.Events[i], true) } + for i := range orig.Links { - DeleteOrigSpan_Link(orig.Links[i], true) + DeleteSpanLink(orig.Links[i], true) } - DeleteOrigStatus(&orig.Status, false) + DeleteStatus(&orig.Status, false) orig.Reset() if nullable { protoPoolSpan.Put(orig) } } -func CopyOrigSpan(dest, src *otlptrace.Span) { +func CopySpan(dest, src *Span) *Span { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewSpan() } - dest.TraceId = src.TraceId - dest.SpanId = src.SpanId - CopyOrigTraceState(&dest.TraceState, &src.TraceState) - dest.ParentSpanId = src.ParentSpanId + CopyTraceID(&dest.TraceId, &src.TraceId) + + CopySpanID(&dest.SpanId, &src.SpanId) + + dest.TraceState = src.TraceState + CopySpanID(&dest.ParentSpanId, &src.ParentSpanId) + dest.Flags = src.Flags dest.Name = src.Name dest.Kind = src.Kind dest.StartTimeUnixNano = src.StartTimeUnixNano dest.EndTimeUnixNano = src.EndTimeUnixNano - dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes) + dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes) + dest.DroppedAttributesCount = src.DroppedAttributesCount - dest.Events = CopyOrigSpan_EventSlice(dest.Events, src.Events) + dest.Events = CopySpanEventPtrSlice(dest.Events, src.Events) + dest.DroppedEventsCount = src.DroppedEventsCount - dest.Links = CopyOrigSpan_LinkSlice(dest.Links, src.Links) + dest.Links = CopySpanLinkPtrSlice(dest.Links, src.Links) + dest.DroppedLinksCount = src.DroppedLinksCount - CopyOrigStatus(&dest.Status, &src.Status) + CopyStatus(&dest.Status, &src.Status) + + return dest } -func GenTestOrigSpan() *otlptrace.Span { - orig := NewOrigSpan() - orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) - orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1}) - orig.TraceState = *GenTestOrigTraceState() - orig.ParentSpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1}) - orig.Flags = uint32(13) - orig.Name = "test_name" - orig.Kind = otlptrace.Span_SpanKind(3) - orig.StartTimeUnixNano = 1234567890 - orig.EndTimeUnixNano = 1234567890 - orig.Attributes = GenerateOrigTestKeyValueSlice() - orig.DroppedAttributesCount = uint32(13) - orig.Events = GenerateOrigTestSpan_EventSlice() - orig.DroppedEventsCount = uint32(13) - orig.Links = GenerateOrigTestSpan_LinkSlice() - orig.DroppedLinksCount = uint32(13) - orig.Status = *GenTestOrigStatus() - return orig +func CopySpanSlice(dest, src []Span) []Span { + var newDest []Span + if cap(dest) < len(src) { + newDest = make([]Span, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSpan(&dest[i], false) + } + } + for i := range src { + CopySpan(&newDest[i], &src[i]) + } + return newDest +} + +func CopySpanPtrSlice(dest, src []*Span) []*Span { + var newDest []*Span + if cap(dest) < len(src) { + newDest = make([]*Span, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSpan() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSpan(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSpan() + } + } + for i := range src { + CopySpan(newDest[i], src[i]) + } + return newDest } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigSpan(orig *otlptrace.Span, dest *json.Stream) { +func (orig *Span) Reset() { + *orig = Span{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Span) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() - if orig.TraceId != data.TraceID([16]byte{}) { + if !orig.TraceId.IsEmpty() { dest.WriteObjectField("traceId") - MarshalJSONOrigTraceID(&orig.TraceId, dest) + orig.TraceId.MarshalJSON(dest) } - if orig.SpanId != data.SpanID([8]byte{}) { + if !orig.SpanId.IsEmpty() { dest.WriteObjectField("spanId") - MarshalJSONOrigSpanID(&orig.SpanId, dest) + orig.SpanId.MarshalJSON(dest) } if orig.TraceState != "" { dest.WriteObjectField("traceState") dest.WriteString(orig.TraceState) } - if orig.ParentSpanId != data.SpanID([8]byte{}) { + if !orig.ParentSpanId.IsEmpty() { dest.WriteObjectField("parentSpanId") - MarshalJSONOrigSpanID(&orig.ParentSpanId, dest) + orig.ParentSpanId.MarshalJSON(dest) } if orig.Flags != uint32(0) { dest.WriteObjectField("flags") @@ -150,10 +218,10 @@ func MarshalJSONOrigSpan(orig *otlptrace.Span, dest *json.Stream) { if len(orig.Attributes) > 0 { dest.WriteObjectField("attributes") dest.WriteArrayStart() - MarshalJSONOrigKeyValue(&orig.Attributes[0], dest) + orig.Attributes[0].MarshalJSON(dest) for i := 1; i < len(orig.Attributes); i++ { dest.WriteMore() - MarshalJSONOrigKeyValue(&orig.Attributes[i], dest) + orig.Attributes[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -164,10 +232,10 @@ func MarshalJSONOrigSpan(orig *otlptrace.Span, dest *json.Stream) { if len(orig.Events) > 0 { dest.WriteObjectField("events") dest.WriteArrayStart() - MarshalJSONOrigSpan_Event(orig.Events[0], dest) + orig.Events[0].MarshalJSON(dest) for i := 1; i < len(orig.Events); i++ { dest.WriteMore() - MarshalJSONOrigSpan_Event(orig.Events[i], dest) + orig.Events[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -178,10 +246,10 @@ func MarshalJSONOrigSpan(orig *otlptrace.Span, dest *json.Stream) { if len(orig.Links) > 0 { dest.WriteObjectField("links") dest.WriteArrayStart() - MarshalJSONOrigSpan_Link(orig.Links[0], dest) + orig.Links[0].MarshalJSON(dest) for i := 1; i < len(orig.Links); i++ { dest.WriteMore() - MarshalJSONOrigSpan_Link(orig.Links[i], dest) + orig.Links[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -190,132 +258,137 @@ func MarshalJSONOrigSpan(orig *otlptrace.Span, dest *json.Stream) { dest.WriteUint32(orig.DroppedLinksCount) } dest.WriteObjectField("status") - MarshalJSONOrigStatus(&orig.Status, dest) + orig.Status.MarshalJSON(dest) dest.WriteObjectEnd() } -// UnmarshalJSONOrigSpan unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigSpan(orig *otlptrace.Span, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Span) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "traceId", "trace_id": - UnmarshalJSONOrigTraceID(&orig.TraceId, iter) + + orig.TraceId.UnmarshalJSON(iter) case "spanId", "span_id": - UnmarshalJSONOrigSpanID(&orig.SpanId, iter) + + orig.SpanId.UnmarshalJSON(iter) case "traceState", "trace_state": orig.TraceState = iter.ReadString() case "parentSpanId", "parent_span_id": - UnmarshalJSONOrigSpanID(&orig.ParentSpanId, iter) + + orig.ParentSpanId.UnmarshalJSON(iter) case "flags": orig.Flags = iter.ReadUint32() case "name": orig.Name = iter.ReadString() case "kind": - orig.Kind = otlptrace.Span_SpanKind(iter.ReadEnumValue(otlptrace.Span_SpanKind_value)) + orig.Kind = SpanKind(iter.ReadEnumValue(SpanKind_value)) case "startTimeUnixNano", "start_time_unix_nano": orig.StartTimeUnixNano = iter.ReadUint64() case "endTimeUnixNano", "end_time_unix_nano": orig.EndTimeUnixNano = iter.ReadUint64() case "attributes": for iter.ReadArray() { - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter) + orig.Attributes = append(orig.Attributes, KeyValue{}) + orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter) } case "droppedAttributesCount", "dropped_attributes_count": orig.DroppedAttributesCount = iter.ReadUint32() case "events": for iter.ReadArray() { - orig.Events = append(orig.Events, NewOrigSpan_Event()) - UnmarshalJSONOrigSpan_Event(orig.Events[len(orig.Events)-1], iter) + orig.Events = append(orig.Events, NewSpanEvent()) + orig.Events[len(orig.Events)-1].UnmarshalJSON(iter) } case "droppedEventsCount", "dropped_events_count": orig.DroppedEventsCount = iter.ReadUint32() case "links": for iter.ReadArray() { - orig.Links = append(orig.Links, NewOrigSpan_Link()) - UnmarshalJSONOrigSpan_Link(orig.Links[len(orig.Links)-1], iter) + orig.Links = append(orig.Links, NewSpanLink()) + orig.Links[len(orig.Links)-1].UnmarshalJSON(iter) } case "droppedLinksCount", "dropped_links_count": orig.DroppedLinksCount = iter.ReadUint32() case "status": - UnmarshalJSONOrigStatus(&orig.Status, iter) + + orig.Status.UnmarshalJSON(iter) default: iter.Skip() } } } -func SizeProtoOrigSpan(orig *otlptrace.Span) int { +func (orig *Span) SizeProto() int { var n int var l int _ = l - l = SizeProtoOrigTraceID(&orig.TraceId) + l = orig.TraceId.SizeProto() n += 1 + proto.Sov(uint64(l)) + l - l = SizeProtoOrigSpanID(&orig.SpanId) + l = orig.SpanId.SizeProto() n += 1 + proto.Sov(uint64(l)) + l + l = len(orig.TraceState) if l > 0 { n += 1 + proto.Sov(uint64(l)) + l } - l = SizeProtoOrigSpanID(&orig.ParentSpanId) + l = orig.ParentSpanId.SizeProto() n += 1 + proto.Sov(uint64(l)) + l - if orig.Flags != 0 { + if orig.Flags != uint32(0) { n += 6 } + l = len(orig.Name) if l > 0 { n += 1 + proto.Sov(uint64(l)) + l } - if orig.Kind != 0 { + if orig.Kind != SpanKind(0) { n += 1 + proto.Sov(uint64(orig.Kind)) } - if orig.StartTimeUnixNano != 0 { + if orig.StartTimeUnixNano != uint64(0) { n += 9 } - if orig.EndTimeUnixNano != 0 { + if orig.EndTimeUnixNano != uint64(0) { n += 9 } for i := range orig.Attributes { - l = SizeProtoOrigKeyValue(&orig.Attributes[i]) + l = orig.Attributes[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.DroppedAttributesCount != 0 { + if orig.DroppedAttributesCount != uint32(0) { n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount)) } for i := range orig.Events { - l = SizeProtoOrigSpan_Event(orig.Events[i]) + l = orig.Events[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.DroppedEventsCount != 0 { + if orig.DroppedEventsCount != uint32(0) { n += 1 + proto.Sov(uint64(orig.DroppedEventsCount)) } for i := range orig.Links { - l = SizeProtoOrigSpan_Link(orig.Links[i]) + l = orig.Links[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.DroppedLinksCount != 0 { + if orig.DroppedLinksCount != uint32(0) { n += 1 + proto.Sov(uint64(orig.DroppedLinksCount)) } - l = SizeProtoOrigStatus(&orig.Status) + l = orig.Status.SizeProto() n += 1 + proto.Sov(uint64(l)) + l return n } -func MarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) int { +func (orig *Span) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l - - l = MarshalProtoOrigTraceID(&orig.TraceId, buf[:pos]) + l = orig.TraceId.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0xa - l = MarshalProtoOrigSpanID(&orig.SpanId, buf[:pos]) + l = orig.SpanId.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- @@ -329,14 +402,13 @@ func MarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) int { pos-- buf[pos] = 0x1a } - - l = MarshalProtoOrigSpanID(&orig.ParentSpanId, buf[:pos]) + l = orig.ParentSpanId.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x22 - if orig.Flags != 0 { + if orig.Flags != uint32(0) { pos -= 4 binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.Flags)) pos-- @@ -352,61 +424,60 @@ func MarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) int { pos-- buf[pos] = 0x2a } - if orig.Kind != 0 { + if orig.Kind != SpanKind(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.Kind)) pos-- buf[pos] = 0x30 } - if orig.StartTimeUnixNano != 0 { + if orig.StartTimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano)) pos-- buf[pos] = 0x39 } - if orig.EndTimeUnixNano != 0 { + if orig.EndTimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.EndTimeUnixNano)) pos-- buf[pos] = 0x41 } for i := len(orig.Attributes) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos]) + l = orig.Attributes[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x4a } - if orig.DroppedAttributesCount != 0 { + if orig.DroppedAttributesCount != uint32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount)) pos-- buf[pos] = 0x50 } for i := len(orig.Events) - 1; i >= 0; i-- { - l = MarshalProtoOrigSpan_Event(orig.Events[i], buf[:pos]) + l = orig.Events[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x5a } - if orig.DroppedEventsCount != 0 { + if orig.DroppedEventsCount != uint32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedEventsCount)) pos-- buf[pos] = 0x60 } for i := len(orig.Links) - 1; i >= 0; i-- { - l = MarshalProtoOrigSpan_Link(orig.Links[i], buf[:pos]) + l = orig.Links[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x6a } - if orig.DroppedLinksCount != 0 { + if orig.DroppedLinksCount != uint32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedLinksCount)) pos-- buf[pos] = 0x70 } - - l = MarshalProtoOrigStatus(&orig.Status, buf[:pos]) + l = orig.Status.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- @@ -415,7 +486,7 @@ func MarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) int { return len(buf) - pos } -func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error { +func (orig *Span) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -441,7 +512,7 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error { } startPos := pos - length - err = UnmarshalProtoOrigTraceID(&orig.TraceId, buf[startPos:pos]) + err = orig.TraceId.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -457,7 +528,7 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error { } startPos := pos - length - err = UnmarshalProtoOrigSpanID(&orig.SpanId, buf[startPos:pos]) + err = orig.SpanId.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -485,7 +556,7 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error { } startPos := pos - length - err = UnmarshalProtoOrigSpanID(&orig.ParentSpanId, buf[startPos:pos]) + err = orig.ParentSpanId.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -523,8 +594,7 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error { if err != nil { return err } - - orig.Kind = otlptrace.Span_SpanKind(num) + orig.Kind = SpanKind(num) case 7: if wireType != proto.WireTypeI64 { @@ -560,8 +630,8 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error { return err } startPos := pos - length - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos]) + orig.Attributes = append(orig.Attributes, KeyValue{}) + err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -575,7 +645,6 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error { if err != nil { return err } - orig.DroppedAttributesCount = uint32(num) case 11: @@ -588,8 +657,8 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error { return err } startPos := pos - length - orig.Events = append(orig.Events, NewOrigSpan_Event()) - err = UnmarshalProtoOrigSpan_Event(orig.Events[len(orig.Events)-1], buf[startPos:pos]) + orig.Events = append(orig.Events, NewSpanEvent()) + err = orig.Events[len(orig.Events)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -603,7 +672,6 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error { if err != nil { return err } - orig.DroppedEventsCount = uint32(num) case 13: @@ -616,8 +684,8 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error { return err } startPos := pos - length - orig.Links = append(orig.Links, NewOrigSpan_Link()) - err = UnmarshalProtoOrigSpan_Link(orig.Links[len(orig.Links)-1], buf[startPos:pos]) + orig.Links = append(orig.Links, NewSpanLink()) + err = orig.Links[len(orig.Links)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -631,7 +699,6 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error { if err != nil { return err } - orig.DroppedLinksCount = uint32(num) case 15: @@ -645,7 +712,7 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error { } startPos := pos - length - err = UnmarshalProtoOrigStatus(&orig.Status, buf[startPos:pos]) + err = orig.Status.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -658,3 +725,41 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error { } return nil } + +func GenTestSpan() *Span { + orig := NewSpan() + orig.TraceId = *GenTestTraceID() + orig.SpanId = *GenTestSpanID() + orig.TraceState = "test_tracestate" + orig.ParentSpanId = *GenTestSpanID() + orig.Flags = uint32(13) + orig.Name = "test_name" + orig.Kind = SpanKind(13) + orig.StartTimeUnixNano = uint64(13) + orig.EndTimeUnixNano = uint64(13) + orig.Attributes = []KeyValue{{}, *GenTestKeyValue()} + orig.DroppedAttributesCount = uint32(13) + orig.Events = []*SpanEvent{{}, GenTestSpanEvent()} + orig.DroppedEventsCount = uint32(13) + orig.Links = []*SpanLink{{}, GenTestSpanLink()} + orig.DroppedLinksCount = uint32(13) + orig.Status = *GenTestStatus() + return orig +} + +func GenTestSpanPtrSlice() []*Span { + orig := make([]*Span, 5) + orig[0] = NewSpan() + orig[1] = GenTestSpan() + orig[2] = NewSpan() + orig[3] = GenTestSpan() + orig[4] = NewSpan() + return orig +} + +func GenTestSpanSlice() []Span { + orig := make([]Span, 5) + orig[1] = *GenTestSpan() + orig[3] = *GenTestSpan() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spancontext.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spancontext.go new file mode 100644 index 00000000000..2861cf11754 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spancontext.go @@ -0,0 +1,364 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "encoding/binary" + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +type SpanContext struct { + TraceState string + TraceFlags uint32 + TraceID TraceID + SpanID SpanID + Remote bool +} + +var ( + protoPoolSpanContext = sync.Pool{ + New: func() any { + return &SpanContext{} + }, + } +) + +func NewSpanContext() *SpanContext { + if !UseProtoPooling.IsEnabled() { + return &SpanContext{} + } + return protoPoolSpanContext.Get().(*SpanContext) +} + +func DeleteSpanContext(orig *SpanContext, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteTraceID(&orig.TraceID, false) + DeleteSpanID(&orig.SpanID, false) + + orig.Reset() + if nullable { + protoPoolSpanContext.Put(orig) + } +} + +func CopySpanContext(dest, src *SpanContext) *SpanContext { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewSpanContext() + } + CopyTraceID(&dest.TraceID, &src.TraceID) + + CopySpanID(&dest.SpanID, &src.SpanID) + + dest.TraceFlags = src.TraceFlags + dest.TraceState = src.TraceState + dest.Remote = src.Remote + + return dest +} + +func CopySpanContextSlice(dest, src []SpanContext) []SpanContext { + var newDest []SpanContext + if cap(dest) < len(src) { + newDest = make([]SpanContext, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSpanContext(&dest[i], false) + } + } + for i := range src { + CopySpanContext(&newDest[i], &src[i]) + } + return newDest +} + +func CopySpanContextPtrSlice(dest, src []*SpanContext) []*SpanContext { + var newDest []*SpanContext + if cap(dest) < len(src) { + newDest = make([]*SpanContext, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSpanContext() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSpanContext(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSpanContext() + } + } + for i := range src { + CopySpanContext(newDest[i], src[i]) + } + return newDest +} + +func (orig *SpanContext) Reset() { + *orig = SpanContext{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *SpanContext) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if !orig.TraceID.IsEmpty() { + dest.WriteObjectField("traceID") + orig.TraceID.MarshalJSON(dest) + } + if !orig.SpanID.IsEmpty() { + dest.WriteObjectField("spanID") + orig.SpanID.MarshalJSON(dest) + } + if orig.TraceFlags != uint32(0) { + dest.WriteObjectField("traceFlags") + dest.WriteUint32(orig.TraceFlags) + } + if orig.TraceState != "" { + dest.WriteObjectField("traceState") + dest.WriteString(orig.TraceState) + } + if orig.Remote != false { + dest.WriteObjectField("remote") + dest.WriteBool(orig.Remote) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *SpanContext) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "traceID", "trace_id": + + orig.TraceID.UnmarshalJSON(iter) + case "spanID", "span_id": + + orig.SpanID.UnmarshalJSON(iter) + case "traceFlags", "trace_flags": + orig.TraceFlags = iter.ReadUint32() + case "traceState", "trace_state": + orig.TraceState = iter.ReadString() + case "remote": + orig.Remote = iter.ReadBool() + default: + iter.Skip() + } + } +} + +func (orig *SpanContext) SizeProto() int { + var n int + var l int + _ = l + l = orig.TraceID.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + l = orig.SpanID.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + if orig.TraceFlags != uint32(0) { + n += 5 + } + + l = len(orig.TraceState) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + if orig.Remote != false { + n += 2 + } + return n +} + +func (orig *SpanContext) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = orig.TraceID.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + + l = orig.SpanID.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + + if orig.TraceFlags != uint32(0) { + pos -= 4 + binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.TraceFlags)) + pos-- + buf[pos] = 0x1d + } + l = len(orig.TraceState) + if l > 0 { + pos -= l + copy(buf[pos:], orig.TraceState) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x22 + } + if orig.Remote != false { + pos-- + if orig.Remote { + buf[pos] = 1 + } else { + buf[pos] = 0 + } + pos-- + buf[pos] = 0x28 + } + return len(buf) - pos +} + +func (orig *SpanContext) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.TraceID.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.SpanID.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeI32 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceFlags", wireType) + } + var num uint32 + num, pos, err = proto.ConsumeI32(buf, pos) + if err != nil { + return err + } + + orig.TraceFlags = uint32(num) + + case 4: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.TraceState = string(buf[startPos:pos]) + + case 5: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.Remote = num != 0 + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestSpanContext() *SpanContext { + orig := NewSpanContext() + orig.TraceID = *GenTestTraceID() + orig.SpanID = *GenTestSpanID() + orig.TraceFlags = uint32(13) + orig.TraceState = "test_tracestate" + orig.Remote = true + return orig +} + +func GenTestSpanContextPtrSlice() []*SpanContext { + orig := make([]*SpanContext, 5) + orig[0] = NewSpanContext() + orig[1] = GenTestSpanContext() + orig[2] = NewSpanContext() + orig[3] = GenTestSpanContext() + orig[4] = NewSpanContext() + return orig +} + +func GenTestSpanContextSlice() []SpanContext { + orig := make([]SpanContext, 5) + orig[1] = *GenTestSpanContext() + orig[3] = *GenTestSpanContext() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_event.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanevent.go similarity index 52% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_event.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanevent.go index 8931b6267cc..56d1ea50b5a 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_event.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanevent.go @@ -11,28 +11,35 @@ import ( "fmt" "sync" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. See OTLP for event definition. +type SpanEvent struct { + Name string + Attributes []KeyValue + TimeUnixNano uint64 + DroppedAttributesCount uint32 +} + var ( - protoPoolSpan_Event = sync.Pool{ + protoPoolSpanEvent = sync.Pool{ New: func() any { - return &otlptrace.Span_Event{} + return &SpanEvent{} }, } ) -func NewOrigSpan_Event() *otlptrace.Span_Event { +func NewSpanEvent() *SpanEvent { if !UseProtoPooling.IsEnabled() { - return &otlptrace.Span_Event{} + return &SpanEvent{} } - return protoPoolSpan_Event.Get().(*otlptrace.Span_Event) + return protoPoolSpanEvent.Get().(*SpanEvent) } -func DeleteOrigSpan_Event(orig *otlptrace.Span_Event, nullable bool) { +func DeleteSpanEvent(orig *SpanEvent, nullable bool) { if orig == nil { return } @@ -43,37 +50,91 @@ func DeleteOrigSpan_Event(orig *otlptrace.Span_Event, nullable bool) { } for i := range orig.Attributes { - DeleteOrigKeyValue(&orig.Attributes[i], false) + DeleteKeyValue(&orig.Attributes[i], false) } orig.Reset() if nullable { - protoPoolSpan_Event.Put(orig) + protoPoolSpanEvent.Put(orig) } } -func CopyOrigSpan_Event(dest, src *otlptrace.Span_Event) { +func CopySpanEvent(dest, src *SpanEvent) *SpanEvent { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewSpanEvent() } dest.TimeUnixNano = src.TimeUnixNano dest.Name = src.Name - dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes) + dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes) + dest.DroppedAttributesCount = src.DroppedAttributesCount + + return dest } -func GenTestOrigSpan_Event() *otlptrace.Span_Event { - orig := NewOrigSpan_Event() - orig.TimeUnixNano = 1234567890 - orig.Name = "test_name" - orig.Attributes = GenerateOrigTestKeyValueSlice() - orig.DroppedAttributesCount = uint32(13) - return orig +func CopySpanEventSlice(dest, src []SpanEvent) []SpanEvent { + var newDest []SpanEvent + if cap(dest) < len(src) { + newDest = make([]SpanEvent, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSpanEvent(&dest[i], false) + } + } + for i := range src { + CopySpanEvent(&newDest[i], &src[i]) + } + return newDest } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigSpan_Event(orig *otlptrace.Span_Event, dest *json.Stream) { +func CopySpanEventPtrSlice(dest, src []*SpanEvent) []*SpanEvent { + var newDest []*SpanEvent + if cap(dest) < len(src) { + newDest = make([]*SpanEvent, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSpanEvent() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSpanEvent(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSpanEvent() + } + } + for i := range src { + CopySpanEvent(newDest[i], src[i]) + } + return newDest +} + +func (orig *SpanEvent) Reset() { + *orig = SpanEvent{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *SpanEvent) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() if orig.TimeUnixNano != uint64(0) { dest.WriteObjectField("timeUnixNano") @@ -86,10 +147,10 @@ func MarshalJSONOrigSpan_Event(orig *otlptrace.Span_Event, dest *json.Stream) { if len(orig.Attributes) > 0 { dest.WriteObjectField("attributes") dest.WriteArrayStart() - MarshalJSONOrigKeyValue(&orig.Attributes[0], dest) + orig.Attributes[0].MarshalJSON(dest) for i := 1; i < len(orig.Attributes); i++ { dest.WriteMore() - MarshalJSONOrigKeyValue(&orig.Attributes[i], dest) + orig.Attributes[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -100,8 +161,8 @@ func MarshalJSONOrigSpan_Event(orig *otlptrace.Span_Event, dest *json.Stream) { dest.WriteObjectEnd() } -// UnmarshalJSONOrigSpanEvent unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigSpan_Event(orig *otlptrace.Span_Event, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *SpanEvent) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "timeUnixNano", "time_unix_nano": @@ -110,8 +171,8 @@ func UnmarshalJSONOrigSpan_Event(orig *otlptrace.Span_Event, iter *json.Iterator orig.Name = iter.ReadString() case "attributes": for iter.ReadArray() { - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter) + orig.Attributes = append(orig.Attributes, KeyValue{}) + orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter) } case "droppedAttributesCount", "dropped_attributes_count": @@ -122,32 +183,33 @@ func UnmarshalJSONOrigSpan_Event(orig *otlptrace.Span_Event, iter *json.Iterator } } -func SizeProtoOrigSpan_Event(orig *otlptrace.Span_Event) int { +func (orig *SpanEvent) SizeProto() int { var n int var l int _ = l - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { n += 9 } + l = len(orig.Name) if l > 0 { n += 1 + proto.Sov(uint64(l)) + l } for i := range orig.Attributes { - l = SizeProtoOrigKeyValue(&orig.Attributes[i]) + l = orig.Attributes[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.DroppedAttributesCount != 0 { + if orig.DroppedAttributesCount != uint32(0) { n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount)) } return n } -func MarshalProtoOrigSpan_Event(orig *otlptrace.Span_Event, buf []byte) int { +func (orig *SpanEvent) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano)) pos-- @@ -162,13 +224,13 @@ func MarshalProtoOrigSpan_Event(orig *otlptrace.Span_Event, buf []byte) int { buf[pos] = 0x12 } for i := len(orig.Attributes) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos]) + l = orig.Attributes[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x1a } - if orig.DroppedAttributesCount != 0 { + if orig.DroppedAttributesCount != uint32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount)) pos-- buf[pos] = 0x20 @@ -176,7 +238,7 @@ func MarshalProtoOrigSpan_Event(orig *otlptrace.Span_Event, buf []byte) int { return len(buf) - pos } -func UnmarshalProtoOrigSpan_Event(orig *otlptrace.Span_Event, buf []byte) error { +func (orig *SpanEvent) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -225,8 +287,8 @@ func UnmarshalProtoOrigSpan_Event(orig *otlptrace.Span_Event, buf []byte) error return err } startPos := pos - length - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos]) + orig.Attributes = append(orig.Attributes, KeyValue{}) + err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -240,7 +302,6 @@ func UnmarshalProtoOrigSpan_Event(orig *otlptrace.Span_Event, buf []byte) error if err != nil { return err } - orig.DroppedAttributesCount = uint32(num) default: pos, err = proto.ConsumeUnknown(buf, pos, wireType) @@ -251,3 +312,29 @@ func UnmarshalProtoOrigSpan_Event(orig *otlptrace.Span_Event, buf []byte) error } return nil } + +func GenTestSpanEvent() *SpanEvent { + orig := NewSpanEvent() + orig.TimeUnixNano = uint64(13) + orig.Name = "test_name" + orig.Attributes = []KeyValue{{}, *GenTestKeyValue()} + orig.DroppedAttributesCount = uint32(13) + return orig +} + +func GenTestSpanEventPtrSlice() []*SpanEvent { + orig := make([]*SpanEvent, 5) + orig[0] = NewSpanEvent() + orig[1] = GenTestSpanEvent() + orig[2] = NewSpanEvent() + orig[3] = GenTestSpanEvent() + orig[4] = NewSpanEvent() + return orig +} + +func GenTestSpanEventSlice() []SpanEvent { + orig := make([]SpanEvent, 5) + orig[1] = *GenTestSpanEvent() + orig[3] = *GenTestSpanEvent() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_link.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanlink.go similarity index 53% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_link.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanlink.go index 0adb3bf260d..a7f8a670db4 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_link.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanlink.go @@ -11,29 +11,38 @@ import ( "fmt" "sync" - "go.opentelemetry.io/collector/pdata/internal/data" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// SpanLink is a pointer from the current span to another span in the same trace or in a +// different trace. +// See Link definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto +type SpanLink struct { + TraceState string + Attributes []KeyValue + DroppedAttributesCount uint32 + Flags uint32 + TraceId TraceID + SpanId SpanID +} + var ( - protoPoolSpan_Link = sync.Pool{ + protoPoolSpanLink = sync.Pool{ New: func() any { - return &otlptrace.Span_Link{} + return &SpanLink{} }, } ) -func NewOrigSpan_Link() *otlptrace.Span_Link { +func NewSpanLink() *SpanLink { if !UseProtoPooling.IsEnabled() { - return &otlptrace.Span_Link{} + return &SpanLink{} } - return protoPoolSpan_Link.Get().(*otlptrace.Span_Link) + return protoPoolSpanLink.Get().(*SpanLink) } -func DeleteOrigSpan_Link(orig *otlptrace.Span_Link, nullable bool) { +func DeleteSpanLink(orig *SpanLink, nullable bool) { if orig == nil { return } @@ -42,53 +51,107 @@ func DeleteOrigSpan_Link(orig *otlptrace.Span_Link, nullable bool) { orig.Reset() return } + DeleteTraceID(&orig.TraceId, false) + DeleteSpanID(&orig.SpanId, false) - DeleteOrigTraceID(&orig.TraceId, false) - DeleteOrigSpanID(&orig.SpanId, false) for i := range orig.Attributes { - DeleteOrigKeyValue(&orig.Attributes[i], false) + DeleteKeyValue(&orig.Attributes[i], false) } orig.Reset() if nullable { - protoPoolSpan_Link.Put(orig) + protoPoolSpanLink.Put(orig) } } -func CopyOrigSpan_Link(dest, src *otlptrace.Span_Link) { +func CopySpanLink(dest, src *SpanLink) *SpanLink { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewSpanLink() } - dest.TraceId = src.TraceId - dest.SpanId = src.SpanId - CopyOrigTraceState(&dest.TraceState, &src.TraceState) - dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes) + CopyTraceID(&dest.TraceId, &src.TraceId) + + CopySpanID(&dest.SpanId, &src.SpanId) + + dest.TraceState = src.TraceState + dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes) + dest.DroppedAttributesCount = src.DroppedAttributesCount dest.Flags = src.Flags + + return dest } -func GenTestOrigSpan_Link() *otlptrace.Span_Link { - orig := NewOrigSpan_Link() - orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) - orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1}) - orig.TraceState = *GenTestOrigTraceState() - orig.Attributes = GenerateOrigTestKeyValueSlice() - orig.DroppedAttributesCount = uint32(13) - orig.Flags = uint32(13) - return orig +func CopySpanLinkSlice(dest, src []SpanLink) []SpanLink { + var newDest []SpanLink + if cap(dest) < len(src) { + newDest = make([]SpanLink, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSpanLink(&dest[i], false) + } + } + for i := range src { + CopySpanLink(&newDest[i], &src[i]) + } + return newDest +} + +func CopySpanLinkPtrSlice(dest, src []*SpanLink) []*SpanLink { + var newDest []*SpanLink + if cap(dest) < len(src) { + newDest = make([]*SpanLink, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSpanLink() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSpanLink(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSpanLink() + } + } + for i := range src { + CopySpanLink(newDest[i], src[i]) + } + return newDest +} + +func (orig *SpanLink) Reset() { + *orig = SpanLink{} } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigSpan_Link(orig *otlptrace.Span_Link, dest *json.Stream) { +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *SpanLink) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() - if orig.TraceId != data.TraceID([16]byte{}) { + if !orig.TraceId.IsEmpty() { dest.WriteObjectField("traceId") - MarshalJSONOrigTraceID(&orig.TraceId, dest) + orig.TraceId.MarshalJSON(dest) } - if orig.SpanId != data.SpanID([8]byte{}) { + if !orig.SpanId.IsEmpty() { dest.WriteObjectField("spanId") - MarshalJSONOrigSpanID(&orig.SpanId, dest) + orig.SpanId.MarshalJSON(dest) } if orig.TraceState != "" { dest.WriteObjectField("traceState") @@ -97,10 +160,10 @@ func MarshalJSONOrigSpan_Link(orig *otlptrace.Span_Link, dest *json.Stream) { if len(orig.Attributes) > 0 { dest.WriteObjectField("attributes") dest.WriteArrayStart() - MarshalJSONOrigKeyValue(&orig.Attributes[0], dest) + orig.Attributes[0].MarshalJSON(dest) for i := 1; i < len(orig.Attributes); i++ { dest.WriteMore() - MarshalJSONOrigKeyValue(&orig.Attributes[i], dest) + orig.Attributes[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -115,20 +178,22 @@ func MarshalJSONOrigSpan_Link(orig *otlptrace.Span_Link, dest *json.Stream) { dest.WriteObjectEnd() } -// UnmarshalJSONOrigSpanLink unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigSpan_Link(orig *otlptrace.Span_Link, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *SpanLink) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "traceId", "trace_id": - UnmarshalJSONOrigTraceID(&orig.TraceId, iter) + + orig.TraceId.UnmarshalJSON(iter) case "spanId", "span_id": - UnmarshalJSONOrigSpanID(&orig.SpanId, iter) + + orig.SpanId.UnmarshalJSON(iter) case "traceState", "trace_state": orig.TraceState = iter.ReadString() case "attributes": for iter.ReadArray() { - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter) + orig.Attributes = append(orig.Attributes, KeyValue{}) + orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter) } case "droppedAttributesCount", "dropped_attributes_count": @@ -141,43 +206,43 @@ func UnmarshalJSONOrigSpan_Link(orig *otlptrace.Span_Link, iter *json.Iterator) } } -func SizeProtoOrigSpan_Link(orig *otlptrace.Span_Link) int { +func (orig *SpanLink) SizeProto() int { var n int var l int _ = l - l = SizeProtoOrigTraceID(&orig.TraceId) + l = orig.TraceId.SizeProto() n += 1 + proto.Sov(uint64(l)) + l - l = SizeProtoOrigSpanID(&orig.SpanId) + l = orig.SpanId.SizeProto() n += 1 + proto.Sov(uint64(l)) + l + l = len(orig.TraceState) if l > 0 { n += 1 + proto.Sov(uint64(l)) + l } for i := range orig.Attributes { - l = SizeProtoOrigKeyValue(&orig.Attributes[i]) + l = orig.Attributes[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.DroppedAttributesCount != 0 { + if orig.DroppedAttributesCount != uint32(0) { n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount)) } - if orig.Flags != 0 { + if orig.Flags != uint32(0) { n += 5 } return n } -func MarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) int { +func (orig *SpanLink) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l - - l = MarshalProtoOrigTraceID(&orig.TraceId, buf[:pos]) + l = orig.TraceId.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0xa - l = MarshalProtoOrigSpanID(&orig.SpanId, buf[:pos]) + l = orig.SpanId.MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- @@ -192,18 +257,18 @@ func MarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) int { buf[pos] = 0x1a } for i := len(orig.Attributes) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos]) + l = orig.Attributes[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x22 } - if orig.DroppedAttributesCount != 0 { + if orig.DroppedAttributesCount != uint32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount)) pos-- buf[pos] = 0x28 } - if orig.Flags != 0 { + if orig.Flags != uint32(0) { pos -= 4 binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.Flags)) pos-- @@ -212,7 +277,7 @@ func MarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) int { return len(buf) - pos } -func UnmarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) error { +func (orig *SpanLink) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -238,7 +303,7 @@ func UnmarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) error { } startPos := pos - length - err = UnmarshalProtoOrigTraceID(&orig.TraceId, buf[startPos:pos]) + err = orig.TraceId.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -254,7 +319,7 @@ func UnmarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) error { } startPos := pos - length - err = UnmarshalProtoOrigSpanID(&orig.SpanId, buf[startPos:pos]) + err = orig.SpanId.UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -281,8 +346,8 @@ func UnmarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) error { return err } startPos := pos - length - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos]) + orig.Attributes = append(orig.Attributes, KeyValue{}) + err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -296,7 +361,6 @@ func UnmarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) error { if err != nil { return err } - orig.DroppedAttributesCount = uint32(num) case 6: @@ -319,3 +383,31 @@ func UnmarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) error { } return nil } + +func GenTestSpanLink() *SpanLink { + orig := NewSpanLink() + orig.TraceId = *GenTestTraceID() + orig.SpanId = *GenTestSpanID() + orig.TraceState = "test_tracestate" + orig.Attributes = []KeyValue{{}, *GenTestKeyValue()} + orig.DroppedAttributesCount = uint32(13) + orig.Flags = uint32(13) + return orig +} + +func GenTestSpanLinkPtrSlice() []*SpanLink { + orig := make([]*SpanLink, 5) + orig[0] = NewSpanLink() + orig[1] = GenTestSpanLink() + orig[2] = NewSpanLink() + orig[3] = GenTestSpanLink() + orig[4] = NewSpanLink() + return orig +} + +func GenTestSpanLinkSlice() []SpanLink { + orig := make([]SpanLink, 5) + orig[1] = *GenTestSpanLink() + orig[3] = *GenTestSpanLink() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stack.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_stack.go similarity index 53% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stack.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_stack.go index 6b230b30c6f..be7a4ce48a7 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stack.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_stack.go @@ -10,27 +10,32 @@ import ( "fmt" "sync" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// Stack represents a stack trace as a list of locations. + +type Stack struct { + LocationIndices []int32 +} + var ( protoPoolStack = sync.Pool{ New: func() any { - return &otlpprofiles.Stack{} + return &Stack{} }, } ) -func NewOrigStack() *otlpprofiles.Stack { +func NewStack() *Stack { if !UseProtoPooling.IsEnabled() { - return &otlpprofiles.Stack{} + return &Stack{} } - return protoPoolStack.Get().(*otlpprofiles.Stack) + return protoPoolStack.Get().(*Stack) } -func DeleteOrigStack(orig *otlpprofiles.Stack, nullable bool) { +func DeleteStack(orig *Stack, nullable bool) { if orig == nil { return } @@ -46,22 +51,78 @@ func DeleteOrigStack(orig *otlpprofiles.Stack, nullable bool) { } } -func CopyOrigStack(dest, src *otlpprofiles.Stack) { +func CopyStack(dest, src *Stack) *Stack { // If copying to same object, just return. if src == dest { - return + return dest } - dest.LocationIndices = CopyOrigInt32Slice(dest.LocationIndices, src.LocationIndices) + + if src == nil { + return nil + } + + if dest == nil { + dest = NewStack() + } + dest.LocationIndices = append(dest.LocationIndices[:0], src.LocationIndices...) + + return dest } -func GenTestOrigStack() *otlpprofiles.Stack { - orig := NewOrigStack() - orig.LocationIndices = GenerateOrigTestInt32Slice() - return orig +func CopyStackSlice(dest, src []Stack) []Stack { + var newDest []Stack + if cap(dest) < len(src) { + newDest = make([]Stack, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteStack(&dest[i], false) + } + } + for i := range src { + CopyStack(&newDest[i], &src[i]) + } + return newDest } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigStack(orig *otlpprofiles.Stack, dest *json.Stream) { +func CopyStackPtrSlice(dest, src []*Stack) []*Stack { + var newDest []*Stack + if cap(dest) < len(src) { + newDest = make([]*Stack, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewStack() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteStack(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewStack() + } + } + for i := range src { + CopyStack(newDest[i], src[i]) + } + return newDest +} + +func (orig *Stack) Reset() { + *orig = Stack{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Stack) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() if len(orig.LocationIndices) > 0 { dest.WriteObjectField("locationIndices") @@ -73,11 +134,12 @@ func MarshalJSONOrigStack(orig *otlpprofiles.Stack, dest *json.Stream) { } dest.WriteArrayEnd() } + dest.WriteObjectEnd() } -// UnmarshalJSONOrigStack unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigStack(orig *otlpprofiles.Stack, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Stack) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "locationIndices", "location_indices": @@ -91,10 +153,11 @@ func UnmarshalJSONOrigStack(orig *otlpprofiles.Stack, iter *json.Iterator) { } } -func SizeProtoOrigStack(orig *otlpprofiles.Stack) int { +func (orig *Stack) SizeProto() int { var n int var l int _ = l + if len(orig.LocationIndices) > 0 { l = 0 for _, e := range orig.LocationIndices { @@ -105,7 +168,7 @@ func SizeProtoOrigStack(orig *otlpprofiles.Stack) int { return n } -func MarshalProtoOrigStack(orig *otlpprofiles.Stack, buf []byte) int { +func (orig *Stack) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l @@ -122,7 +185,7 @@ func MarshalProtoOrigStack(orig *otlpprofiles.Stack, buf []byte) int { return len(buf) - pos } -func UnmarshalProtoOrigStack(orig *otlpprofiles.Stack, buf []byte) error { +func (orig *Stack) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -175,3 +238,26 @@ func UnmarshalProtoOrigStack(orig *otlpprofiles.Stack, buf []byte) error { } return nil } + +func GenTestStack() *Stack { + orig := NewStack() + orig.LocationIndices = []int32{int32(0), int32(13)} + return orig +} + +func GenTestStackPtrSlice() []*Stack { + orig := make([]*Stack, 5) + orig[0] = NewStack() + orig[1] = GenTestStack() + orig[2] = NewStack() + orig[3] = GenTestStack() + orig[4] = NewStack() + return orig +} + +func GenTestStackSlice() []Stack { + orig := make([]Stack, 5) + orig[1] = *GenTestStack() + orig[3] = *GenTestStack() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_status.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_status.go new file mode 100644 index 00000000000..77b5fe39268 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_status.go @@ -0,0 +1,259 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// Status is an optional final status for this span. Semantically, when Status was not +// set, that means the span ended without errors and to assume Status.Ok (code = 0). +type Status struct { + Message string + Code StatusCode +} + +var ( + protoPoolStatus = sync.Pool{ + New: func() any { + return &Status{} + }, + } +) + +func NewStatus() *Status { + if !UseProtoPooling.IsEnabled() { + return &Status{} + } + return protoPoolStatus.Get().(*Status) +} + +func DeleteStatus(orig *Status, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + orig.Reset() + if nullable { + protoPoolStatus.Put(orig) + } +} + +func CopyStatus(dest, src *Status) *Status { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewStatus() + } + dest.Message = src.Message + dest.Code = src.Code + + return dest +} + +func CopyStatusSlice(dest, src []Status) []Status { + var newDest []Status + if cap(dest) < len(src) { + newDest = make([]Status, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteStatus(&dest[i], false) + } + } + for i := range src { + CopyStatus(&newDest[i], &src[i]) + } + return newDest +} + +func CopyStatusPtrSlice(dest, src []*Status) []*Status { + var newDest []*Status + if cap(dest) < len(src) { + newDest = make([]*Status, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewStatus() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteStatus(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewStatus() + } + } + for i := range src { + CopyStatus(newDest[i], src[i]) + } + return newDest +} + +func (orig *Status) Reset() { + *orig = Status{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Status) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.Message != "" { + dest.WriteObjectField("message") + dest.WriteString(orig.Message) + } + + if int32(orig.Code) != 0 { + dest.WriteObjectField("code") + dest.WriteInt32(int32(orig.Code)) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Status) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "message": + orig.Message = iter.ReadString() + case "code": + orig.Code = StatusCode(iter.ReadEnumValue(StatusCode_value)) + default: + iter.Skip() + } + } +} + +func (orig *Status) SizeProto() int { + var n int + var l int + _ = l + + l = len(orig.Message) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + if orig.Code != StatusCode(0) { + n += 1 + proto.Sov(uint64(orig.Code)) + } + return n +} + +func (orig *Status) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = len(orig.Message) + if l > 0 { + pos -= l + copy(buf[pos:], orig.Message) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + if orig.Code != StatusCode(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.Code)) + pos-- + buf[pos] = 0x18 + } + return len(buf) - pos +} + +func (orig *Status) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Message = string(buf[startPos:pos]) + + case 3: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.Code = StatusCode(num) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestStatus() *Status { + orig := NewStatus() + orig.Message = "test_message" + orig.Code = StatusCode(13) + return orig +} + +func GenTestStatusPtrSlice() []*Status { + orig := make([]*Status, 5) + orig[0] = NewStatus() + orig[1] = GenTestStatus() + orig[2] = NewStatus() + orig[3] = GenTestStatus() + orig[4] = NewStatus() + return orig +} + +func GenTestStatusSlice() []Status { + orig := make([]Status, 5) + orig[1] = *GenTestStatus() + orig[3] = *GenTestStatus() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sum.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sum.go new file mode 100644 index 00000000000..f419344b54d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sum.go @@ -0,0 +1,307 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// Sum represents the type of a numeric metric that is calculated as a sum of all reported measurements over a time interval. +type Sum struct { + DataPoints []*NumberDataPoint + AggregationTemporality AggregationTemporality + IsMonotonic bool +} + +var ( + protoPoolSum = sync.Pool{ + New: func() any { + return &Sum{} + }, + } +) + +func NewSum() *Sum { + if !UseProtoPooling.IsEnabled() { + return &Sum{} + } + return protoPoolSum.Get().(*Sum) +} + +func DeleteSum(orig *Sum, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.DataPoints { + DeleteNumberDataPoint(orig.DataPoints[i], true) + } + + orig.Reset() + if nullable { + protoPoolSum.Put(orig) + } +} + +func CopySum(dest, src *Sum) *Sum { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewSum() + } + dest.DataPoints = CopyNumberDataPointPtrSlice(dest.DataPoints, src.DataPoints) + + dest.AggregationTemporality = src.AggregationTemporality + dest.IsMonotonic = src.IsMonotonic + + return dest +} + +func CopySumSlice(dest, src []Sum) []Sum { + var newDest []Sum + if cap(dest) < len(src) { + newDest = make([]Sum, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSum(&dest[i], false) + } + } + for i := range src { + CopySum(&newDest[i], &src[i]) + } + return newDest +} + +func CopySumPtrSlice(dest, src []*Sum) []*Sum { + var newDest []*Sum + if cap(dest) < len(src) { + newDest = make([]*Sum, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSum() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSum(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSum() + } + } + for i := range src { + CopySum(newDest[i], src[i]) + } + return newDest +} + +func (orig *Sum) Reset() { + *orig = Sum{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Sum) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.DataPoints) > 0 { + dest.WriteObjectField("dataPoints") + dest.WriteArrayStart() + orig.DataPoints[0].MarshalJSON(dest) + for i := 1; i < len(orig.DataPoints); i++ { + dest.WriteMore() + orig.DataPoints[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + + if int32(orig.AggregationTemporality) != 0 { + dest.WriteObjectField("aggregationTemporality") + dest.WriteInt32(int32(orig.AggregationTemporality)) + } + if orig.IsMonotonic != false { + dest.WriteObjectField("isMonotonic") + dest.WriteBool(orig.IsMonotonic) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Sum) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "dataPoints", "data_points": + for iter.ReadArray() { + orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint()) + orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter) + } + + case "aggregationTemporality", "aggregation_temporality": + orig.AggregationTemporality = AggregationTemporality(iter.ReadEnumValue(AggregationTemporality_value)) + case "isMonotonic", "is_monotonic": + orig.IsMonotonic = iter.ReadBool() + default: + iter.Skip() + } + } +} + +func (orig *Sum) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.DataPoints { + l = orig.DataPoints[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + if orig.AggregationTemporality != AggregationTemporality(0) { + n += 1 + proto.Sov(uint64(orig.AggregationTemporality)) + } + if orig.IsMonotonic != false { + n += 2 + } + return n +} + +func (orig *Sum) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.DataPoints) - 1; i >= 0; i-- { + l = orig.DataPoints[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + if orig.AggregationTemporality != AggregationTemporality(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality)) + pos-- + buf[pos] = 0x10 + } + if orig.IsMonotonic != false { + pos-- + if orig.IsMonotonic { + buf[pos] = 1 + } else { + buf[pos] = 0 + } + pos-- + buf[pos] = 0x18 + } + return len(buf) - pos +} + +func (orig *Sum) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint()) + err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 2: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.AggregationTemporality = AggregationTemporality(num) + + case 3: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.IsMonotonic = num != 0 + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestSum() *Sum { + orig := NewSum() + orig.DataPoints = []*NumberDataPoint{{}, GenTestNumberDataPoint()} + orig.AggregationTemporality = AggregationTemporality(13) + orig.IsMonotonic = true + return orig +} + +func GenTestSumPtrSlice() []*Sum { + orig := make([]*Sum, 5) + orig[0] = NewSum() + orig[1] = GenTestSum() + orig[2] = NewSum() + orig[3] = GenTestSum() + orig[4] = NewSum() + return orig +} + +func GenTestSumSlice() []Sum { + orig := make([]Sum, 5) + orig[1] = *GenTestSum() + orig[3] = *GenTestSum() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summary.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summary.go new file mode 100644 index 00000000000..5523e5af8b3 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summary.go @@ -0,0 +1,243 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// Summary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval. +type Summary struct { + DataPoints []*SummaryDataPoint +} + +var ( + protoPoolSummary = sync.Pool{ + New: func() any { + return &Summary{} + }, + } +) + +func NewSummary() *Summary { + if !UseProtoPooling.IsEnabled() { + return &Summary{} + } + return protoPoolSummary.Get().(*Summary) +} + +func DeleteSummary(orig *Summary, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.DataPoints { + DeleteSummaryDataPoint(orig.DataPoints[i], true) + } + orig.Reset() + if nullable { + protoPoolSummary.Put(orig) + } +} + +func CopySummary(dest, src *Summary) *Summary { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewSummary() + } + dest.DataPoints = CopySummaryDataPointPtrSlice(dest.DataPoints, src.DataPoints) + + return dest +} + +func CopySummarySlice(dest, src []Summary) []Summary { + var newDest []Summary + if cap(dest) < len(src) { + newDest = make([]Summary, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSummary(&dest[i], false) + } + } + for i := range src { + CopySummary(&newDest[i], &src[i]) + } + return newDest +} + +func CopySummaryPtrSlice(dest, src []*Summary) []*Summary { + var newDest []*Summary + if cap(dest) < len(src) { + newDest = make([]*Summary, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSummary() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSummary(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSummary() + } + } + for i := range src { + CopySummary(newDest[i], src[i]) + } + return newDest +} + +func (orig *Summary) Reset() { + *orig = Summary{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *Summary) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.DataPoints) > 0 { + dest.WriteObjectField("dataPoints") + dest.WriteArrayStart() + orig.DataPoints[0].MarshalJSON(dest) + for i := 1; i < len(orig.DataPoints); i++ { + dest.WriteMore() + orig.DataPoints[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *Summary) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "dataPoints", "data_points": + for iter.ReadArray() { + orig.DataPoints = append(orig.DataPoints, NewSummaryDataPoint()) + orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *Summary) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.DataPoints { + l = orig.DataPoints[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *Summary) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.DataPoints) - 1; i >= 0; i-- { + l = orig.DataPoints[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + return len(buf) - pos +} + +func (orig *Summary) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.DataPoints = append(orig.DataPoints, NewSummaryDataPoint()) + err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestSummary() *Summary { + orig := NewSummary() + orig.DataPoints = []*SummaryDataPoint{{}, GenTestSummaryDataPoint()} + return orig +} + +func GenTestSummaryPtrSlice() []*Summary { + orig := make([]*Summary, 5) + orig[0] = NewSummary() + orig[1] = GenTestSummary() + orig[2] = NewSummary() + orig[3] = GenTestSummary() + orig[4] = NewSummary() + return orig +} + +func GenTestSummarySlice() []Summary { + orig := make([]Summary, 5) + orig[1] = *GenTestSummary() + orig[3] = *GenTestSummary() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapoint.go similarity index 53% rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapoint.go index 35b4132010a..ccd99fa1801 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapoint.go @@ -12,28 +12,37 @@ import ( "math" "sync" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/proto" ) +// SummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary of double values. +type SummaryDataPoint struct { + Attributes []KeyValue + QuantileValues []*SummaryDataPointValueAtQuantile + StartTimeUnixNano uint64 + TimeUnixNano uint64 + Count uint64 + Sum float64 + Flags uint32 +} + var ( protoPoolSummaryDataPoint = sync.Pool{ New: func() any { - return &otlpmetrics.SummaryDataPoint{} + return &SummaryDataPoint{} }, } ) -func NewOrigSummaryDataPoint() *otlpmetrics.SummaryDataPoint { +func NewSummaryDataPoint() *SummaryDataPoint { if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.SummaryDataPoint{} + return &SummaryDataPoint{} } - return protoPoolSummaryDataPoint.Get().(*otlpmetrics.SummaryDataPoint) + return protoPoolSummaryDataPoint.Get().(*SummaryDataPoint) } -func DeleteOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, nullable bool) { +func DeleteSummaryDataPoint(orig *SummaryDataPoint, nullable bool) { if orig == nil { return } @@ -42,12 +51,12 @@ func DeleteOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, nullable boo orig.Reset() return } - for i := range orig.Attributes { - DeleteOrigKeyValue(&orig.Attributes[i], false) + DeleteKeyValue(&orig.Attributes[i], false) } + for i := range orig.QuantileValues { - DeleteOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[i], true) + DeleteSummaryDataPointValueAtQuantile(orig.QuantileValues[i], true) } orig.Reset() @@ -56,42 +65,94 @@ func DeleteOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, nullable boo } } -func CopyOrigSummaryDataPoint(dest, src *otlpmetrics.SummaryDataPoint) { +func CopySummaryDataPoint(dest, src *SummaryDataPoint) *SummaryDataPoint { // If copying to same object, just return. if src == dest { - return + return dest + } + + if src == nil { + return nil } - dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes) + + if dest == nil { + dest = NewSummaryDataPoint() + } + dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes) + dest.StartTimeUnixNano = src.StartTimeUnixNano dest.TimeUnixNano = src.TimeUnixNano dest.Count = src.Count dest.Sum = src.Sum - dest.QuantileValues = CopyOrigSummaryDataPoint_ValueAtQuantileSlice(dest.QuantileValues, src.QuantileValues) + dest.QuantileValues = CopySummaryDataPointValueAtQuantilePtrSlice(dest.QuantileValues, src.QuantileValues) + dest.Flags = src.Flags + + return dest } -func GenTestOrigSummaryDataPoint() *otlpmetrics.SummaryDataPoint { - orig := NewOrigSummaryDataPoint() - orig.Attributes = GenerateOrigTestKeyValueSlice() - orig.StartTimeUnixNano = 1234567890 - orig.TimeUnixNano = 1234567890 - orig.Count = uint64(13) - orig.Sum = float64(3.1415926) - orig.QuantileValues = GenerateOrigTestSummaryDataPoint_ValueAtQuantileSlice() - orig.Flags = 1 - return orig +func CopySummaryDataPointSlice(dest, src []SummaryDataPoint) []SummaryDataPoint { + var newDest []SummaryDataPoint + if cap(dest) < len(src) { + newDest = make([]SummaryDataPoint, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSummaryDataPoint(&dest[i], false) + } + } + for i := range src { + CopySummaryDataPoint(&newDest[i], &src[i]) + } + return newDest +} + +func CopySummaryDataPointPtrSlice(dest, src []*SummaryDataPoint) []*SummaryDataPoint { + var newDest []*SummaryDataPoint + if cap(dest) < len(src) { + newDest = make([]*SummaryDataPoint, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSummaryDataPoint() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSummaryDataPoint(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSummaryDataPoint() + } + } + for i := range src { + CopySummaryDataPoint(newDest[i], src[i]) + } + return newDest } -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, dest *json.Stream) { +func (orig *SummaryDataPoint) Reset() { + *orig = SummaryDataPoint{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *SummaryDataPoint) MarshalJSON(dest *json.Stream) { dest.WriteObjectStart() if len(orig.Attributes) > 0 { dest.WriteObjectField("attributes") dest.WriteArrayStart() - MarshalJSONOrigKeyValue(&orig.Attributes[0], dest) + orig.Attributes[0].MarshalJSON(dest) for i := 1; i < len(orig.Attributes); i++ { dest.WriteMore() - MarshalJSONOrigKeyValue(&orig.Attributes[i], dest) + orig.Attributes[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -114,10 +175,10 @@ func MarshalJSONOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, dest *j if len(orig.QuantileValues) > 0 { dest.WriteObjectField("quantileValues") dest.WriteArrayStart() - MarshalJSONOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[0], dest) + orig.QuantileValues[0].MarshalJSON(dest) for i := 1; i < len(orig.QuantileValues); i++ { dest.WriteMore() - MarshalJSONOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[i], dest) + orig.QuantileValues[i].MarshalJSON(dest) } dest.WriteArrayEnd() } @@ -128,14 +189,14 @@ func MarshalJSONOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, dest *j dest.WriteObjectEnd() } -// UnmarshalJSONOrigSummaryDataPoint unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, iter *json.Iterator) { +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *SummaryDataPoint) UnmarshalJSON(iter *json.Iterator) { for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { switch f { case "attributes": for iter.ReadArray() { - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter) + orig.Attributes = append(orig.Attributes, KeyValue{}) + orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter) } case "startTimeUnixNano", "start_time_unix_nano": @@ -148,8 +209,8 @@ func UnmarshalJSONOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, iter orig.Sum = iter.ReadFloat64() case "quantileValues", "quantile_values": for iter.ReadArray() { - orig.QuantileValues = append(orig.QuantileValues, NewOrigSummaryDataPoint_ValueAtQuantile()) - UnmarshalJSONOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[len(orig.QuantileValues)-1], iter) + orig.QuantileValues = append(orig.QuantileValues, NewSummaryDataPointValueAtQuantile()) + orig.QuantileValues[len(orig.QuantileValues)-1].UnmarshalJSON(iter) } case "flags": @@ -160,79 +221,79 @@ func UnmarshalJSONOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, iter } } -func SizeProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint) int { +func (orig *SummaryDataPoint) SizeProto() int { var n int var l int _ = l for i := range orig.Attributes { - l = SizeProtoOrigKeyValue(&orig.Attributes[i]) + l = orig.Attributes[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.StartTimeUnixNano != 0 { + if orig.StartTimeUnixNano != uint64(0) { n += 9 } - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { n += 9 } - if orig.Count != 0 { + if orig.Count != uint64(0) { n += 9 } - if orig.Sum != 0 { + if orig.Sum != float64(0) { n += 9 } for i := range orig.QuantileValues { - l = SizeProtoOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[i]) + l = orig.QuantileValues[i].SizeProto() n += 1 + proto.Sov(uint64(l)) + l } - if orig.Flags != 0 { + if orig.Flags != uint32(0) { n += 1 + proto.Sov(uint64(orig.Flags)) } return n } -func MarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf []byte) int { +func (orig *SummaryDataPoint) MarshalProto(buf []byte) int { pos := len(buf) var l int _ = l for i := len(orig.Attributes) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos]) + l = orig.Attributes[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x3a } - if orig.StartTimeUnixNano != 0 { + if orig.StartTimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano)) pos-- buf[pos] = 0x11 } - if orig.TimeUnixNano != 0 { + if orig.TimeUnixNano != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano)) pos-- buf[pos] = 0x19 } - if orig.Count != 0 { + if orig.Count != uint64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.Count)) pos-- buf[pos] = 0x21 } - if orig.Sum != 0 { + if orig.Sum != float64(0) { pos -= 8 binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum)) pos-- buf[pos] = 0x29 } for i := len(orig.QuantileValues) - 1; i >= 0; i-- { - l = MarshalProtoOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[i], buf[:pos]) + l = orig.QuantileValues[i].MarshalProto(buf[:pos]) pos -= l pos = proto.EncodeVarint(buf, pos, uint64(l)) pos-- buf[pos] = 0x32 } - if orig.Flags != 0 { + if orig.Flags != uint32(0) { pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags)) pos-- buf[pos] = 0x40 @@ -240,7 +301,7 @@ func MarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf [] return len(buf) - pos } -func UnmarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf []byte) error { +func (orig *SummaryDataPoint) UnmarshalProto(buf []byte) error { var err error var fieldNum int32 var wireType proto.WireType @@ -265,8 +326,8 @@ func UnmarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf return err } startPos := pos - length - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos]) + orig.Attributes = append(orig.Attributes, KeyValue{}) + err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -316,7 +377,6 @@ func UnmarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf if err != nil { return err } - orig.Sum = math.Float64frombits(num) case 6: @@ -329,8 +389,8 @@ func UnmarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf return err } startPos := pos - length - orig.QuantileValues = append(orig.QuantileValues, NewOrigSummaryDataPoint_ValueAtQuantile()) - err = UnmarshalProtoOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[len(orig.QuantileValues)-1], buf[startPos:pos]) + orig.QuantileValues = append(orig.QuantileValues, NewSummaryDataPointValueAtQuantile()) + err = orig.QuantileValues[len(orig.QuantileValues)-1].UnmarshalProto(buf[startPos:pos]) if err != nil { return err } @@ -344,7 +404,6 @@ func UnmarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf if err != nil { return err } - orig.Flags = uint32(num) default: pos, err = proto.ConsumeUnknown(buf, pos, wireType) @@ -355,3 +414,32 @@ func UnmarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf } return nil } + +func GenTestSummaryDataPoint() *SummaryDataPoint { + orig := NewSummaryDataPoint() + orig.Attributes = []KeyValue{{}, *GenTestKeyValue()} + orig.StartTimeUnixNano = uint64(13) + orig.TimeUnixNano = uint64(13) + orig.Count = uint64(13) + orig.Sum = float64(3.1415926) + orig.QuantileValues = []*SummaryDataPointValueAtQuantile{{}, GenTestSummaryDataPointValueAtQuantile()} + orig.Flags = uint32(13) + return orig +} + +func GenTestSummaryDataPointPtrSlice() []*SummaryDataPoint { + orig := make([]*SummaryDataPoint, 5) + orig[0] = NewSummaryDataPoint() + orig[1] = GenTestSummaryDataPoint() + orig[2] = NewSummaryDataPoint() + orig[3] = GenTestSummaryDataPoint() + orig[4] = NewSummaryDataPoint() + return orig +} + +func GenTestSummaryDataPointSlice() []SummaryDataPoint { + orig := make([]SummaryDataPoint, 5) + orig[1] = *GenTestSummaryDataPoint() + orig[3] = *GenTestSummaryDataPoint() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapointvalueatquantile.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapointvalueatquantile.go new file mode 100644 index 00000000000..3b10b8a9c6c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapointvalueatquantile.go @@ -0,0 +1,255 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "encoding/binary" + "fmt" + "math" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// SummaryDataPointValueAtQuantile is a quantile value within a Summary data point. +type SummaryDataPointValueAtQuantile struct { + Quantile float64 + Value float64 +} + +var ( + protoPoolSummaryDataPointValueAtQuantile = sync.Pool{ + New: func() any { + return &SummaryDataPointValueAtQuantile{} + }, + } +) + +func NewSummaryDataPointValueAtQuantile() *SummaryDataPointValueAtQuantile { + if !UseProtoPooling.IsEnabled() { + return &SummaryDataPointValueAtQuantile{} + } + return protoPoolSummaryDataPointValueAtQuantile.Get().(*SummaryDataPointValueAtQuantile) +} + +func DeleteSummaryDataPointValueAtQuantile(orig *SummaryDataPointValueAtQuantile, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + orig.Reset() + if nullable { + protoPoolSummaryDataPointValueAtQuantile.Put(orig) + } +} + +func CopySummaryDataPointValueAtQuantile(dest, src *SummaryDataPointValueAtQuantile) *SummaryDataPointValueAtQuantile { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewSummaryDataPointValueAtQuantile() + } + dest.Quantile = src.Quantile + dest.Value = src.Value + + return dest +} + +func CopySummaryDataPointValueAtQuantileSlice(dest, src []SummaryDataPointValueAtQuantile) []SummaryDataPointValueAtQuantile { + var newDest []SummaryDataPointValueAtQuantile + if cap(dest) < len(src) { + newDest = make([]SummaryDataPointValueAtQuantile, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSummaryDataPointValueAtQuantile(&dest[i], false) + } + } + for i := range src { + CopySummaryDataPointValueAtQuantile(&newDest[i], &src[i]) + } + return newDest +} + +func CopySummaryDataPointValueAtQuantilePtrSlice(dest, src []*SummaryDataPointValueAtQuantile) []*SummaryDataPointValueAtQuantile { + var newDest []*SummaryDataPointValueAtQuantile + if cap(dest) < len(src) { + newDest = make([]*SummaryDataPointValueAtQuantile, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSummaryDataPointValueAtQuantile() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteSummaryDataPointValueAtQuantile(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewSummaryDataPointValueAtQuantile() + } + } + for i := range src { + CopySummaryDataPointValueAtQuantile(newDest[i], src[i]) + } + return newDest +} + +func (orig *SummaryDataPointValueAtQuantile) Reset() { + *orig = SummaryDataPointValueAtQuantile{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *SummaryDataPointValueAtQuantile) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.Quantile != float64(0) { + dest.WriteObjectField("quantile") + dest.WriteFloat64(orig.Quantile) + } + if orig.Value != float64(0) { + dest.WriteObjectField("value") + dest.WriteFloat64(orig.Value) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *SummaryDataPointValueAtQuantile) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "quantile": + orig.Quantile = iter.ReadFloat64() + case "value": + orig.Value = iter.ReadFloat64() + default: + iter.Skip() + } + } +} + +func (orig *SummaryDataPointValueAtQuantile) SizeProto() int { + var n int + var l int + _ = l + if orig.Quantile != float64(0) { + n += 9 + } + if orig.Value != float64(0) { + n += 9 + } + return n +} + +func (orig *SummaryDataPointValueAtQuantile) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + if orig.Quantile != float64(0) { + pos -= 8 + binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Quantile)) + pos-- + buf[pos] = 0x9 + } + if orig.Value != float64(0) { + pos -= 8 + binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Value)) + pos-- + buf[pos] = 0x11 + } + return len(buf) - pos +} + +func (orig *SummaryDataPointValueAtQuantile) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeI64 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeI64(buf, pos) + if err != nil { + return err + } + orig.Quantile = math.Float64frombits(num) + + case 2: + if wireType != proto.WireTypeI64 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeI64(buf, pos) + if err != nil { + return err + } + orig.Value = math.Float64frombits(num) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestSummaryDataPointValueAtQuantile() *SummaryDataPointValueAtQuantile { + orig := NewSummaryDataPointValueAtQuantile() + orig.Quantile = float64(3.1415926) + orig.Value = float64(3.1415926) + return orig +} + +func GenTestSummaryDataPointValueAtQuantilePtrSlice() []*SummaryDataPointValueAtQuantile { + orig := make([]*SummaryDataPointValueAtQuantile, 5) + orig[0] = NewSummaryDataPointValueAtQuantile() + orig[1] = GenTestSummaryDataPointValueAtQuantile() + orig[2] = NewSummaryDataPointValueAtQuantile() + orig[3] = GenTestSummaryDataPointValueAtQuantile() + orig[4] = NewSummaryDataPointValueAtQuantile() + return orig +} + +func GenTestSummaryDataPointValueAtQuantileSlice() []SummaryDataPointValueAtQuantile { + orig := make([]SummaryDataPointValueAtQuantile, 5) + orig[1] = *GenTestSummaryDataPointValueAtQuantile() + orig[3] = *GenTestSummaryDataPointValueAtQuantile() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tcpaddr.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tcpaddr.go new file mode 100644 index 00000000000..08c350ffef6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tcpaddr.go @@ -0,0 +1,294 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +type TCPAddr struct { + Zone string + IP []byte + Port int64 +} + +var ( + protoPoolTCPAddr = sync.Pool{ + New: func() any { + return &TCPAddr{} + }, + } +) + +func NewTCPAddr() *TCPAddr { + if !UseProtoPooling.IsEnabled() { + return &TCPAddr{} + } + return protoPoolTCPAddr.Get().(*TCPAddr) +} + +func DeleteTCPAddr(orig *TCPAddr, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + orig.Reset() + if nullable { + protoPoolTCPAddr.Put(orig) + } +} + +func CopyTCPAddr(dest, src *TCPAddr) *TCPAddr { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewTCPAddr() + } + dest.IP = src.IP + dest.Port = src.Port + dest.Zone = src.Zone + + return dest +} + +func CopyTCPAddrSlice(dest, src []TCPAddr) []TCPAddr { + var newDest []TCPAddr + if cap(dest) < len(src) { + newDest = make([]TCPAddr, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteTCPAddr(&dest[i], false) + } + } + for i := range src { + CopyTCPAddr(&newDest[i], &src[i]) + } + return newDest +} + +func CopyTCPAddrPtrSlice(dest, src []*TCPAddr) []*TCPAddr { + var newDest []*TCPAddr + if cap(dest) < len(src) { + newDest = make([]*TCPAddr, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewTCPAddr() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteTCPAddr(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewTCPAddr() + } + } + for i := range src { + CopyTCPAddr(newDest[i], src[i]) + } + return newDest +} + +func (orig *TCPAddr) Reset() { + *orig = TCPAddr{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *TCPAddr) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + + if len(orig.IP) > 0 { + dest.WriteObjectField("iP") + dest.WriteBytes(orig.IP) + } + if orig.Port != int64(0) { + dest.WriteObjectField("port") + dest.WriteInt64(orig.Port) + } + if orig.Zone != "" { + dest.WriteObjectField("zone") + dest.WriteString(orig.Zone) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *TCPAddr) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "iP": + orig.IP = iter.ReadBytes() + case "port": + orig.Port = iter.ReadInt64() + case "zone": + orig.Zone = iter.ReadString() + default: + iter.Skip() + } + } +} + +func (orig *TCPAddr) SizeProto() int { + var n int + var l int + _ = l + + l = len(orig.IP) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + if orig.Port != int64(0) { + n += 1 + proto.Sov(uint64(orig.Port)) + } + + l = len(orig.Zone) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *TCPAddr) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = len(orig.IP) + if l > 0 { + pos -= l + copy(buf[pos:], orig.IP) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + if orig.Port != int64(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.Port)) + pos-- + buf[pos] = 0x10 + } + l = len(orig.Zone) + if l > 0 { + pos -= l + copy(buf[pos:], orig.Zone) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + return len(buf) - pos +} + +func (orig *TCPAddr) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + if length != 0 { + orig.IP = make([]byte, length) + copy(orig.IP, buf[startPos:pos]) + } + + case 2: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.Port = int64(num) + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Zone = string(buf[startPos:pos]) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestTCPAddr() *TCPAddr { + orig := NewTCPAddr() + orig.IP = []byte{1, 2, 3} + orig.Port = int64(13) + orig.Zone = "test_zone" + return orig +} + +func GenTestTCPAddrPtrSlice() []*TCPAddr { + orig := make([]*TCPAddr, 5) + orig[0] = NewTCPAddr() + orig[1] = GenTestTCPAddr() + orig[2] = NewTCPAddr() + orig[3] = GenTestTCPAddr() + orig[4] = NewTCPAddr() + return orig +} + +func GenTestTCPAddrSlice() []TCPAddr { + orig := make([]TCPAddr, 5) + orig[1] = *GenTestTCPAddr() + orig[3] = *GenTestTCPAddr() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesdata.go new file mode 100644 index 00000000000..2b54b6247a5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesdata.go @@ -0,0 +1,245 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// TracesData represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do not +// implement the OTLP protocol. +type TracesData struct { + ResourceSpans []*ResourceSpans +} + +var ( + protoPoolTracesData = sync.Pool{ + New: func() any { + return &TracesData{} + }, + } +) + +func NewTracesData() *TracesData { + if !UseProtoPooling.IsEnabled() { + return &TracesData{} + } + return protoPoolTracesData.Get().(*TracesData) +} + +func DeleteTracesData(orig *TracesData, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + for i := range orig.ResourceSpans { + DeleteResourceSpans(orig.ResourceSpans[i], true) + } + orig.Reset() + if nullable { + protoPoolTracesData.Put(orig) + } +} + +func CopyTracesData(dest, src *TracesData) *TracesData { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewTracesData() + } + dest.ResourceSpans = CopyResourceSpansPtrSlice(dest.ResourceSpans, src.ResourceSpans) + + return dest +} + +func CopyTracesDataSlice(dest, src []TracesData) []TracesData { + var newDest []TracesData + if cap(dest) < len(src) { + newDest = make([]TracesData, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteTracesData(&dest[i], false) + } + } + for i := range src { + CopyTracesData(&newDest[i], &src[i]) + } + return newDest +} + +func CopyTracesDataPtrSlice(dest, src []*TracesData) []*TracesData { + var newDest []*TracesData + if cap(dest) < len(src) { + newDest = make([]*TracesData, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewTracesData() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteTracesData(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewTracesData() + } + } + for i := range src { + CopyTracesData(newDest[i], src[i]) + } + return newDest +} + +func (orig *TracesData) Reset() { + *orig = TracesData{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *TracesData) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if len(orig.ResourceSpans) > 0 { + dest.WriteObjectField("resourceSpans") + dest.WriteArrayStart() + orig.ResourceSpans[0].MarshalJSON(dest) + for i := 1; i < len(orig.ResourceSpans); i++ { + dest.WriteMore() + orig.ResourceSpans[i].MarshalJSON(dest) + } + dest.WriteArrayEnd() + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *TracesData) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "resourceSpans", "resource_spans": + for iter.ReadArray() { + orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans()) + orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalJSON(iter) + } + + default: + iter.Skip() + } + } +} + +func (orig *TracesData) SizeProto() int { + var n int + var l int + _ = l + for i := range orig.ResourceSpans { + l = orig.ResourceSpans[i].SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *TracesData) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + for i := len(orig.ResourceSpans) - 1; i >= 0; i-- { + l = orig.ResourceSpans[i].MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + return len(buf) - pos +} + +func (orig *TracesData) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans()) + err = orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestTracesData() *TracesData { + orig := NewTracesData() + orig.ResourceSpans = []*ResourceSpans{{}, GenTestResourceSpans()} + return orig +} + +func GenTestTracesDataPtrSlice() []*TracesData { + orig := make([]*TracesData, 5) + orig[0] = NewTracesData() + orig[1] = GenTestTracesData() + orig[2] = NewTracesData() + orig[3] = GenTestTracesData() + orig[4] = NewTracesData() + return orig +} + +func GenTestTracesDataSlice() []TracesData { + orig := make([]TracesData, 5) + orig[1] = *GenTestTracesData() + orig[3] = *GenTestTracesData() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesrequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesrequest.go new file mode 100644 index 00000000000..b8bb39687fd --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesrequest.go @@ -0,0 +1,299 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "encoding/binary" + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +type TracesRequest struct { + RequestContext *RequestContext + TracesData TracesData + FormatVersion uint32 +} + +var ( + protoPoolTracesRequest = sync.Pool{ + New: func() any { + return &TracesRequest{} + }, + } +) + +func NewTracesRequest() *TracesRequest { + if !UseProtoPooling.IsEnabled() { + return &TracesRequest{} + } + return protoPoolTracesRequest.Get().(*TracesRequest) +} + +func DeleteTracesRequest(orig *TracesRequest, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + DeleteRequestContext(orig.RequestContext, true) + DeleteTracesData(&orig.TracesData, false) + + orig.Reset() + if nullable { + protoPoolTracesRequest.Put(orig) + } +} + +func CopyTracesRequest(dest, src *TracesRequest) *TracesRequest { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewTracesRequest() + } + dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext) + + CopyTracesData(&dest.TracesData, &src.TracesData) + + dest.FormatVersion = src.FormatVersion + + return dest +} + +func CopyTracesRequestSlice(dest, src []TracesRequest) []TracesRequest { + var newDest []TracesRequest + if cap(dest) < len(src) { + newDest = make([]TracesRequest, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteTracesRequest(&dest[i], false) + } + } + for i := range src { + CopyTracesRequest(&newDest[i], &src[i]) + } + return newDest +} + +func CopyTracesRequestPtrSlice(dest, src []*TracesRequest) []*TracesRequest { + var newDest []*TracesRequest + if cap(dest) < len(src) { + newDest = make([]*TracesRequest, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewTracesRequest() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteTracesRequest(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewTracesRequest() + } + } + for i := range src { + CopyTracesRequest(newDest[i], src[i]) + } + return newDest +} + +func (orig *TracesRequest) Reset() { + *orig = TracesRequest{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *TracesRequest) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.RequestContext != nil { + dest.WriteObjectField("requestContext") + orig.RequestContext.MarshalJSON(dest) + } + dest.WriteObjectField("tracesData") + orig.TracesData.MarshalJSON(dest) + if orig.FormatVersion != uint32(0) { + dest.WriteObjectField("formatVersion") + dest.WriteUint32(orig.FormatVersion) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *TracesRequest) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "requestContext", "request_context": + orig.RequestContext = NewRequestContext() + orig.RequestContext.UnmarshalJSON(iter) + case "tracesData", "traces_data": + + orig.TracesData.UnmarshalJSON(iter) + case "formatVersion", "format_version": + orig.FormatVersion = iter.ReadUint32() + default: + iter.Skip() + } + } +} + +func (orig *TracesRequest) SizeProto() int { + var n int + var l int + _ = l + if orig.RequestContext != nil { + l = orig.RequestContext.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + } + l = orig.TracesData.SizeProto() + n += 1 + proto.Sov(uint64(l)) + l + if orig.FormatVersion != uint32(0) { + n += 5 + } + return n +} + +func (orig *TracesRequest) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + if orig.RequestContext != nil { + l = orig.RequestContext.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + l = orig.TracesData.MarshalProto(buf[:pos]) + pos -= l + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + + if orig.FormatVersion != uint32(0) { + pos -= 4 + binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion)) + pos-- + buf[pos] = 0xd + } + return len(buf) - pos +} + +func (orig *TracesRequest) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + orig.RequestContext = NewRequestContext() + err = orig.RequestContext.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field TracesData", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + + err = orig.TracesData.UnmarshalProto(buf[startPos:pos]) + if err != nil { + return err + } + + case 1: + if wireType != proto.WireTypeI32 { + return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType) + } + var num uint32 + num, pos, err = proto.ConsumeI32(buf, pos) + if err != nil { + return err + } + + orig.FormatVersion = uint32(num) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestTracesRequest() *TracesRequest { + orig := NewTracesRequest() + orig.RequestContext = GenTestRequestContext() + orig.TracesData = *GenTestTracesData() + orig.FormatVersion = uint32(13) + return orig +} + +func GenTestTracesRequestPtrSlice() []*TracesRequest { + orig := make([]*TracesRequest, 5) + orig[0] = NewTracesRequest() + orig[1] = GenTestTracesRequest() + orig[2] = NewTracesRequest() + orig[3] = GenTestTracesRequest() + orig[4] = NewTracesRequest() + return orig +} + +func GenTestTracesRequestSlice() []TracesRequest { + orig := make([]TracesRequest, 5) + orig[1] = *GenTestTracesRequest() + orig[3] = *GenTestTracesRequest() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_udpaddr.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_udpaddr.go new file mode 100644 index 00000000000..dd2883de59c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_udpaddr.go @@ -0,0 +1,294 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +type UDPAddr struct { + Zone string + IP []byte + Port int64 +} + +var ( + protoPoolUDPAddr = sync.Pool{ + New: func() any { + return &UDPAddr{} + }, + } +) + +func NewUDPAddr() *UDPAddr { + if !UseProtoPooling.IsEnabled() { + return &UDPAddr{} + } + return protoPoolUDPAddr.Get().(*UDPAddr) +} + +func DeleteUDPAddr(orig *UDPAddr, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + orig.Reset() + if nullable { + protoPoolUDPAddr.Put(orig) + } +} + +func CopyUDPAddr(dest, src *UDPAddr) *UDPAddr { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewUDPAddr() + } + dest.IP = src.IP + dest.Port = src.Port + dest.Zone = src.Zone + + return dest +} + +func CopyUDPAddrSlice(dest, src []UDPAddr) []UDPAddr { + var newDest []UDPAddr + if cap(dest) < len(src) { + newDest = make([]UDPAddr, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteUDPAddr(&dest[i], false) + } + } + for i := range src { + CopyUDPAddr(&newDest[i], &src[i]) + } + return newDest +} + +func CopyUDPAddrPtrSlice(dest, src []*UDPAddr) []*UDPAddr { + var newDest []*UDPAddr + if cap(dest) < len(src) { + newDest = make([]*UDPAddr, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewUDPAddr() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteUDPAddr(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewUDPAddr() + } + } + for i := range src { + CopyUDPAddr(newDest[i], src[i]) + } + return newDest +} + +func (orig *UDPAddr) Reset() { + *orig = UDPAddr{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *UDPAddr) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + + if len(orig.IP) > 0 { + dest.WriteObjectField("iP") + dest.WriteBytes(orig.IP) + } + if orig.Port != int64(0) { + dest.WriteObjectField("port") + dest.WriteInt64(orig.Port) + } + if orig.Zone != "" { + dest.WriteObjectField("zone") + dest.WriteString(orig.Zone) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *UDPAddr) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "iP": + orig.IP = iter.ReadBytes() + case "port": + orig.Port = iter.ReadInt64() + case "zone": + orig.Zone = iter.ReadString() + default: + iter.Skip() + } + } +} + +func (orig *UDPAddr) SizeProto() int { + var n int + var l int + _ = l + + l = len(orig.IP) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + if orig.Port != int64(0) { + n += 1 + proto.Sov(uint64(orig.Port)) + } + + l = len(orig.Zone) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *UDPAddr) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = len(orig.IP) + if l > 0 { + pos -= l + copy(buf[pos:], orig.IP) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + if orig.Port != int64(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.Port)) + pos-- + buf[pos] = 0x10 + } + l = len(orig.Zone) + if l > 0 { + pos -= l + copy(buf[pos:], orig.Zone) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x1a + } + return len(buf) - pos +} + +func (orig *UDPAddr) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + if length != 0 { + orig.IP = make([]byte, length) + copy(orig.IP, buf[startPos:pos]) + } + + case 2: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.Port = int64(num) + + case 3: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Zone = string(buf[startPos:pos]) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestUDPAddr() *UDPAddr { + orig := NewUDPAddr() + orig.IP = []byte{1, 2, 3} + orig.Port = int64(13) + orig.Zone = "test_zone" + return orig +} + +func GenTestUDPAddrPtrSlice() []*UDPAddr { + orig := make([]*UDPAddr, 5) + orig[0] = NewUDPAddr() + orig[1] = GenTestUDPAddr() + orig[2] = NewUDPAddr() + orig[3] = GenTestUDPAddr() + orig[4] = NewUDPAddr() + return orig +} + +func GenTestUDPAddrSlice() []UDPAddr { + orig := make([]UDPAddr, 5) + orig[1] = *GenTestUDPAddr() + orig[3] = *GenTestUDPAddr() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_unixaddr.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_unixaddr.go new file mode 100644 index 00000000000..be00bfbd3c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_unixaddr.go @@ -0,0 +1,262 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +type UnixAddr struct { + Name string + Net string +} + +var ( + protoPoolUnixAddr = sync.Pool{ + New: func() any { + return &UnixAddr{} + }, + } +) + +func NewUnixAddr() *UnixAddr { + if !UseProtoPooling.IsEnabled() { + return &UnixAddr{} + } + return protoPoolUnixAddr.Get().(*UnixAddr) +} + +func DeleteUnixAddr(orig *UnixAddr, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + orig.Reset() + if nullable { + protoPoolUnixAddr.Put(orig) + } +} + +func CopyUnixAddr(dest, src *UnixAddr) *UnixAddr { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewUnixAddr() + } + dest.Name = src.Name + dest.Net = src.Net + + return dest +} + +func CopyUnixAddrSlice(dest, src []UnixAddr) []UnixAddr { + var newDest []UnixAddr + if cap(dest) < len(src) { + newDest = make([]UnixAddr, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteUnixAddr(&dest[i], false) + } + } + for i := range src { + CopyUnixAddr(&newDest[i], &src[i]) + } + return newDest +} + +func CopyUnixAddrPtrSlice(dest, src []*UnixAddr) []*UnixAddr { + var newDest []*UnixAddr + if cap(dest) < len(src) { + newDest = make([]*UnixAddr, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewUnixAddr() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteUnixAddr(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewUnixAddr() + } + } + for i := range src { + CopyUnixAddr(newDest[i], src[i]) + } + return newDest +} + +func (orig *UnixAddr) Reset() { + *orig = UnixAddr{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *UnixAddr) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.Name != "" { + dest.WriteObjectField("name") + dest.WriteString(orig.Name) + } + if orig.Net != "" { + dest.WriteObjectField("net") + dest.WriteString(orig.Net) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *UnixAddr) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "name": + orig.Name = iter.ReadString() + case "net": + orig.Net = iter.ReadString() + default: + iter.Skip() + } + } +} + +func (orig *UnixAddr) SizeProto() int { + var n int + var l int + _ = l + + l = len(orig.Name) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + + l = len(orig.Net) + if l > 0 { + n += 1 + proto.Sov(uint64(l)) + l + } + return n +} + +func (orig *UnixAddr) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + l = len(orig.Name) + if l > 0 { + pos -= l + copy(buf[pos:], orig.Name) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0xa + } + l = len(orig.Net) + if l > 0 { + pos -= l + copy(buf[pos:], orig.Net) + pos = proto.EncodeVarint(buf, pos, uint64(l)) + pos-- + buf[pos] = 0x12 + } + return len(buf) - pos +} + +func (orig *UnixAddr) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Name = string(buf[startPos:pos]) + + case 2: + if wireType != proto.WireTypeLen { + return fmt.Errorf("proto: wrong wireType = %d for field Net", wireType) + } + var length int + length, pos, err = proto.ConsumeLen(buf, pos) + if err != nil { + return err + } + startPos := pos - length + orig.Net = string(buf[startPos:pos]) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestUnixAddr() *UnixAddr { + orig := NewUnixAddr() + orig.Name = "test_name" + orig.Net = "test_net" + return orig +} + +func GenTestUnixAddrPtrSlice() []*UnixAddr { + orig := make([]*UnixAddr, 5) + orig[0] = NewUnixAddr() + orig[1] = GenTestUnixAddr() + orig[2] = NewUnixAddr() + orig[3] = GenTestUnixAddr() + orig[4] = NewUnixAddr() + return orig +} + +func GenTestUnixAddrSlice() []UnixAddr { + orig := make([]UnixAddr, 5) + orig[1] = *GenTestUnixAddr() + orig[3] = *GenTestUnixAddr() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_valuetype.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_valuetype.go new file mode 100644 index 00000000000..eccd89e6ef8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_valuetype.go @@ -0,0 +1,251 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + "fmt" + "sync" + + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/proto" +) + +// ValueType describes the type and units of a value. +type ValueType struct { + TypeStrindex int32 + UnitStrindex int32 +} + +var ( + protoPoolValueType = sync.Pool{ + New: func() any { + return &ValueType{} + }, + } +) + +func NewValueType() *ValueType { + if !UseProtoPooling.IsEnabled() { + return &ValueType{} + } + return protoPoolValueType.Get().(*ValueType) +} + +func DeleteValueType(orig *ValueType, nullable bool) { + if orig == nil { + return + } + + if !UseProtoPooling.IsEnabled() { + orig.Reset() + return + } + + orig.Reset() + if nullable { + protoPoolValueType.Put(orig) + } +} + +func CopyValueType(dest, src *ValueType) *ValueType { + // If copying to same object, just return. + if src == dest { + return dest + } + + if src == nil { + return nil + } + + if dest == nil { + dest = NewValueType() + } + dest.TypeStrindex = src.TypeStrindex + dest.UnitStrindex = src.UnitStrindex + + return dest +} + +func CopyValueTypeSlice(dest, src []ValueType) []ValueType { + var newDest []ValueType + if cap(dest) < len(src) { + newDest = make([]ValueType, len(src)) + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteValueType(&dest[i], false) + } + } + for i := range src { + CopyValueType(&newDest[i], &src[i]) + } + return newDest +} + +func CopyValueTypePtrSlice(dest, src []*ValueType) []*ValueType { + var newDest []*ValueType + if cap(dest) < len(src) { + newDest = make([]*ValueType, len(src)) + // Copy old pointers to re-use. + copy(newDest, dest) + // Add new pointers for missing elements from len(dest) to len(srt). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewValueType() + } + } else { + newDest = dest[:len(src)] + // Cleanup the rest of the elements so GC can free the memory. + // This can happen when len(src) < len(dest) < cap(dest). + for i := len(src); i < len(dest); i++ { + DeleteValueType(dest[i], true) + dest[i] = nil + } + // Add new pointers for missing elements. + // This can happen when len(dest) < len(src) < cap(dest). + for i := len(dest); i < len(src); i++ { + newDest[i] = NewValueType() + } + } + for i := range src { + CopyValueType(newDest[i], src[i]) + } + return newDest +} + +func (orig *ValueType) Reset() { + *orig = ValueType{} +} + +// MarshalJSON marshals all properties from the current struct to the destination stream. +func (orig *ValueType) MarshalJSON(dest *json.Stream) { + dest.WriteObjectStart() + if orig.TypeStrindex != int32(0) { + dest.WriteObjectField("typeStrindex") + dest.WriteInt32(orig.TypeStrindex) + } + if orig.UnitStrindex != int32(0) { + dest.WriteObjectField("unitStrindex") + dest.WriteInt32(orig.UnitStrindex) + } + dest.WriteObjectEnd() +} + +// UnmarshalJSON unmarshals all properties from the current struct from the source iterator. +func (orig *ValueType) UnmarshalJSON(iter *json.Iterator) { + for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { + switch f { + case "typeStrindex", "type_strindex": + orig.TypeStrindex = iter.ReadInt32() + case "unitStrindex", "unit_strindex": + orig.UnitStrindex = iter.ReadInt32() + default: + iter.Skip() + } + } +} + +func (orig *ValueType) SizeProto() int { + var n int + var l int + _ = l + if orig.TypeStrindex != int32(0) { + n += 1 + proto.Sov(uint64(orig.TypeStrindex)) + } + if orig.UnitStrindex != int32(0) { + n += 1 + proto.Sov(uint64(orig.UnitStrindex)) + } + return n +} + +func (orig *ValueType) MarshalProto(buf []byte) int { + pos := len(buf) + var l int + _ = l + if orig.TypeStrindex != int32(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.TypeStrindex)) + pos-- + buf[pos] = 0x8 + } + if orig.UnitStrindex != int32(0) { + pos = proto.EncodeVarint(buf, pos, uint64(orig.UnitStrindex)) + pos-- + buf[pos] = 0x10 + } + return len(buf) - pos +} + +func (orig *ValueType) UnmarshalProto(buf []byte) error { + var err error + var fieldNum int32 + var wireType proto.WireType + + l := len(buf) + pos := 0 + for pos < l { + // If in a group parsing, move to the next tag. + fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) + if err != nil { + return err + } + switch fieldNum { + + case 1: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field TypeStrindex", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.TypeStrindex = int32(num) + + case 2: + if wireType != proto.WireTypeVarint { + return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType) + } + var num uint64 + num, pos, err = proto.ConsumeVarint(buf, pos) + if err != nil { + return err + } + orig.UnitStrindex = int32(num) + default: + pos, err = proto.ConsumeUnknown(buf, pos, wireType) + if err != nil { + return err + } + } + } + return nil +} + +func GenTestValueType() *ValueType { + orig := NewValueType() + orig.TypeStrindex = int32(13) + orig.UnitStrindex = int32(13) + return orig +} + +func GenTestValueTypePtrSlice() []*ValueType { + orig := make([]*ValueType, 5) + orig[0] = NewValueType() + orig[1] = GenTestValueType() + orig[2] = NewValueType() + orig[3] = GenTestValueType() + orig[4] = NewValueType() + return orig +} + +func GenTestValueTypeSlice() []ValueType { + orig := make([]ValueType, 5) + orig[1] = *GenTestValueType() + orig[3] = *GenTestValueType() + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalue.go deleted file mode 100644 index 013192398cb..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalue.go +++ /dev/null @@ -1,596 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "encoding/binary" - "fmt" - "math" - "sync" - - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolAnyValue = sync.Pool{ - New: func() any { - return &otlpcommon.AnyValue{} - }, - } - - ProtoPoolAnyValue_StringValue = sync.Pool{ - New: func() any { - return &otlpcommon.AnyValue_StringValue{} - }, - } - - ProtoPoolAnyValue_BoolValue = sync.Pool{ - New: func() any { - return &otlpcommon.AnyValue_BoolValue{} - }, - } - - ProtoPoolAnyValue_IntValue = sync.Pool{ - New: func() any { - return &otlpcommon.AnyValue_IntValue{} - }, - } - - ProtoPoolAnyValue_DoubleValue = sync.Pool{ - New: func() any { - return &otlpcommon.AnyValue_DoubleValue{} - }, - } - - ProtoPoolAnyValue_ArrayValue = sync.Pool{ - New: func() any { - return &otlpcommon.AnyValue_ArrayValue{} - }, - } - - ProtoPoolAnyValue_KvlistValue = sync.Pool{ - New: func() any { - return &otlpcommon.AnyValue_KvlistValue{} - }, - } - - ProtoPoolAnyValue_BytesValue = sync.Pool{ - New: func() any { - return &otlpcommon.AnyValue_BytesValue{} - }, - } -) - -func NewOrigAnyValue() *otlpcommon.AnyValue { - if !UseProtoPooling.IsEnabled() { - return &otlpcommon.AnyValue{} - } - return protoPoolAnyValue.Get().(*otlpcommon.AnyValue) -} - -func DeleteOrigAnyValue(orig *otlpcommon.AnyValue, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - switch ov := orig.Value.(type) { - case *otlpcommon.AnyValue_StringValue: - if UseProtoPooling.IsEnabled() { - ov.StringValue = "" - ProtoPoolAnyValue_StringValue.Put(ov) - } - case *otlpcommon.AnyValue_BoolValue: - if UseProtoPooling.IsEnabled() { - ov.BoolValue = false - ProtoPoolAnyValue_BoolValue.Put(ov) - } - case *otlpcommon.AnyValue_IntValue: - if UseProtoPooling.IsEnabled() { - ov.IntValue = int64(0) - ProtoPoolAnyValue_IntValue.Put(ov) - } - case *otlpcommon.AnyValue_DoubleValue: - if UseProtoPooling.IsEnabled() { - ov.DoubleValue = float64(0) - ProtoPoolAnyValue_DoubleValue.Put(ov) - } - case *otlpcommon.AnyValue_ArrayValue: - DeleteOrigArrayValue(ov.ArrayValue, true) - ov.ArrayValue = nil - ProtoPoolAnyValue_ArrayValue.Put(ov) - case *otlpcommon.AnyValue_KvlistValue: - DeleteOrigKeyValueList(ov.KvlistValue, true) - ov.KvlistValue = nil - ProtoPoolAnyValue_KvlistValue.Put(ov) - case *otlpcommon.AnyValue_BytesValue: - if UseProtoPooling.IsEnabled() { - ov.BytesValue = nil - ProtoPoolAnyValue_BytesValue.Put(ov) - } - - } - - orig.Reset() - if nullable { - protoPoolAnyValue.Put(orig) - } -} - -func CopyOrigAnyValue(dest, src *otlpcommon.AnyValue) { - // If copying to same object, just return. - if src == dest { - return - } - switch t := src.Value.(type) { - case *otlpcommon.AnyValue_StringValue: - var ov *otlpcommon.AnyValue_StringValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_StringValue{} - } else { - ov = ProtoPoolAnyValue_StringValue.Get().(*otlpcommon.AnyValue_StringValue) - } - ov.StringValue = t.StringValue - dest.Value = ov - case *otlpcommon.AnyValue_BoolValue: - var ov *otlpcommon.AnyValue_BoolValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_BoolValue{} - } else { - ov = ProtoPoolAnyValue_BoolValue.Get().(*otlpcommon.AnyValue_BoolValue) - } - ov.BoolValue = t.BoolValue - dest.Value = ov - case *otlpcommon.AnyValue_IntValue: - var ov *otlpcommon.AnyValue_IntValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_IntValue{} - } else { - ov = ProtoPoolAnyValue_IntValue.Get().(*otlpcommon.AnyValue_IntValue) - } - ov.IntValue = t.IntValue - dest.Value = ov - case *otlpcommon.AnyValue_DoubleValue: - var ov *otlpcommon.AnyValue_DoubleValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_DoubleValue{} - } else { - ov = ProtoPoolAnyValue_DoubleValue.Get().(*otlpcommon.AnyValue_DoubleValue) - } - ov.DoubleValue = t.DoubleValue - dest.Value = ov - case *otlpcommon.AnyValue_ArrayValue: - var ov *otlpcommon.AnyValue_ArrayValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_ArrayValue{} - } else { - ov = ProtoPoolAnyValue_ArrayValue.Get().(*otlpcommon.AnyValue_ArrayValue) - } - ov.ArrayValue = NewOrigArrayValue() - CopyOrigArrayValue(ov.ArrayValue, t.ArrayValue) - dest.Value = ov - case *otlpcommon.AnyValue_KvlistValue: - var ov *otlpcommon.AnyValue_KvlistValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_KvlistValue{} - } else { - ov = ProtoPoolAnyValue_KvlistValue.Get().(*otlpcommon.AnyValue_KvlistValue) - } - ov.KvlistValue = NewOrigKeyValueList() - CopyOrigKeyValueList(ov.KvlistValue, t.KvlistValue) - dest.Value = ov - case *otlpcommon.AnyValue_BytesValue: - var ov *otlpcommon.AnyValue_BytesValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_BytesValue{} - } else { - ov = ProtoPoolAnyValue_BytesValue.Get().(*otlpcommon.AnyValue_BytesValue) - } - ov.BytesValue = t.BytesValue - dest.Value = ov - } -} - -func GenTestOrigAnyValue() *otlpcommon.AnyValue { - orig := NewOrigAnyValue() - orig.Value = &otlpcommon.AnyValue_BoolValue{BoolValue: true} - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigAnyValue(orig *otlpcommon.AnyValue, dest *json.Stream) { - dest.WriteObjectStart() - switch orig := orig.Value.(type) { - case *otlpcommon.AnyValue_StringValue: - dest.WriteObjectField("stringValue") - dest.WriteString(orig.StringValue) - case *otlpcommon.AnyValue_BoolValue: - dest.WriteObjectField("boolValue") - dest.WriteBool(orig.BoolValue) - case *otlpcommon.AnyValue_IntValue: - dest.WriteObjectField("intValue") - dest.WriteInt64(orig.IntValue) - case *otlpcommon.AnyValue_DoubleValue: - dest.WriteObjectField("doubleValue") - dest.WriteFloat64(orig.DoubleValue) - case *otlpcommon.AnyValue_ArrayValue: - if orig.ArrayValue != nil { - dest.WriteObjectField("arrayValue") - MarshalJSONOrigArrayValue(orig.ArrayValue, dest) - } - case *otlpcommon.AnyValue_KvlistValue: - if orig.KvlistValue != nil { - dest.WriteObjectField("kvlistValue") - MarshalJSONOrigKeyValueList(orig.KvlistValue, dest) - } - case *otlpcommon.AnyValue_BytesValue: - - dest.WriteObjectField("bytesValue") - dest.WriteBytes(orig.BytesValue) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigValue unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigAnyValue(orig *otlpcommon.AnyValue, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - - case "stringValue", "string_value": - { - var ov *otlpcommon.AnyValue_StringValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_StringValue{} - } else { - ov = ProtoPoolAnyValue_StringValue.Get().(*otlpcommon.AnyValue_StringValue) - } - ov.StringValue = iter.ReadString() - orig.Value = ov - } - - case "boolValue", "bool_value": - { - var ov *otlpcommon.AnyValue_BoolValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_BoolValue{} - } else { - ov = ProtoPoolAnyValue_BoolValue.Get().(*otlpcommon.AnyValue_BoolValue) - } - ov.BoolValue = iter.ReadBool() - orig.Value = ov - } - - case "intValue", "int_value": - { - var ov *otlpcommon.AnyValue_IntValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_IntValue{} - } else { - ov = ProtoPoolAnyValue_IntValue.Get().(*otlpcommon.AnyValue_IntValue) - } - ov.IntValue = iter.ReadInt64() - orig.Value = ov - } - - case "doubleValue", "double_value": - { - var ov *otlpcommon.AnyValue_DoubleValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_DoubleValue{} - } else { - ov = ProtoPoolAnyValue_DoubleValue.Get().(*otlpcommon.AnyValue_DoubleValue) - } - ov.DoubleValue = iter.ReadFloat64() - orig.Value = ov - } - - case "arrayValue", "array_value": - { - var ov *otlpcommon.AnyValue_ArrayValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_ArrayValue{} - } else { - ov = ProtoPoolAnyValue_ArrayValue.Get().(*otlpcommon.AnyValue_ArrayValue) - } - ov.ArrayValue = NewOrigArrayValue() - UnmarshalJSONOrigArrayValue(ov.ArrayValue, iter) - orig.Value = ov - } - - case "kvlistValue", "kvlist_value": - { - var ov *otlpcommon.AnyValue_KvlistValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_KvlistValue{} - } else { - ov = ProtoPoolAnyValue_KvlistValue.Get().(*otlpcommon.AnyValue_KvlistValue) - } - ov.KvlistValue = NewOrigKeyValueList() - UnmarshalJSONOrigKeyValueList(ov.KvlistValue, iter) - orig.Value = ov - } - - case "bytesValue", "bytes_value": - { - var ov *otlpcommon.AnyValue_BytesValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_BytesValue{} - } else { - ov = ProtoPoolAnyValue_BytesValue.Get().(*otlpcommon.AnyValue_BytesValue) - } - ov.BytesValue = iter.ReadBytes() - orig.Value = ov - } - - default: - iter.Skip() - } - } -} - -func SizeProtoOrigAnyValue(orig *otlpcommon.AnyValue) int { - var n int - var l int - _ = l - switch orig := orig.Value.(type) { - case nil: - _ = orig - break - case *otlpcommon.AnyValue_StringValue: - l = len(orig.StringValue) - n += 1 + proto.Sov(uint64(l)) + l - case *otlpcommon.AnyValue_BoolValue: - n += 2 - case *otlpcommon.AnyValue_IntValue: - n += 1 + proto.Sov(uint64(orig.IntValue)) - case *otlpcommon.AnyValue_DoubleValue: - n += 9 - case *otlpcommon.AnyValue_ArrayValue: - l = SizeProtoOrigArrayValue(orig.ArrayValue) - n += 1 + proto.Sov(uint64(l)) + l - case *otlpcommon.AnyValue_KvlistValue: - l = SizeProtoOrigKeyValueList(orig.KvlistValue) - n += 1 + proto.Sov(uint64(l)) + l - case *otlpcommon.AnyValue_BytesValue: - l = len(orig.BytesValue) - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigAnyValue(orig *otlpcommon.AnyValue, buf []byte) int { - pos := len(buf) - var l int - _ = l - switch orig := orig.Value.(type) { - case *otlpcommon.AnyValue_StringValue: - l = len(orig.StringValue) - pos -= l - copy(buf[pos:], orig.StringValue) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - case *otlpcommon.AnyValue_BoolValue: - pos-- - if orig.BoolValue { - buf[pos] = 1 - } else { - buf[pos] = 0 - } - pos-- - buf[pos] = 0x10 - - case *otlpcommon.AnyValue_IntValue: - pos = proto.EncodeVarint(buf, pos, uint64(orig.IntValue)) - pos-- - buf[pos] = 0x18 - - case *otlpcommon.AnyValue_DoubleValue: - pos -= 8 - binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.DoubleValue)) - pos-- - buf[pos] = 0x21 - - case *otlpcommon.AnyValue_ArrayValue: - - l = MarshalProtoOrigArrayValue(orig.ArrayValue, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x2a - - case *otlpcommon.AnyValue_KvlistValue: - - l = MarshalProtoOrigKeyValueList(orig.KvlistValue, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x32 - - case *otlpcommon.AnyValue_BytesValue: - l = len(orig.BytesValue) - pos -= l - copy(buf[pos:], orig.BytesValue) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x3a - - } - return len(buf) - pos -} - -func UnmarshalProtoOrigAnyValue(orig *otlpcommon.AnyValue, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - var ov *otlpcommon.AnyValue_StringValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_StringValue{} - } else { - ov = ProtoPoolAnyValue_StringValue.Get().(*otlpcommon.AnyValue_StringValue) - } - ov.StringValue = string(buf[startPos:pos]) - orig.Value = ov - - case 2: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - var ov *otlpcommon.AnyValue_BoolValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_BoolValue{} - } else { - ov = ProtoPoolAnyValue_BoolValue.Get().(*otlpcommon.AnyValue_BoolValue) - } - ov.BoolValue = num != 0 - orig.Value = ov - - case 3: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - var ov *otlpcommon.AnyValue_IntValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_IntValue{} - } else { - ov = ProtoPoolAnyValue_IntValue.Get().(*otlpcommon.AnyValue_IntValue) - } - ov.IntValue = int64(num) - orig.Value = ov - - case 4: - if wireType != proto.WireTypeI64 { - return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeI64(buf, pos) - if err != nil { - return err - } - var ov *otlpcommon.AnyValue_DoubleValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_DoubleValue{} - } else { - ov = ProtoPoolAnyValue_DoubleValue.Get().(*otlpcommon.AnyValue_DoubleValue) - } - ov.DoubleValue = math.Float64frombits(num) - orig.Value = ov - - case 5: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - var ov *otlpcommon.AnyValue_ArrayValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_ArrayValue{} - } else { - ov = ProtoPoolAnyValue_ArrayValue.Get().(*otlpcommon.AnyValue_ArrayValue) - } - ov.ArrayValue = NewOrigArrayValue() - err = UnmarshalProtoOrigArrayValue(ov.ArrayValue, buf[startPos:pos]) - if err != nil { - return err - } - orig.Value = ov - - case 6: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - var ov *otlpcommon.AnyValue_KvlistValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_KvlistValue{} - } else { - ov = ProtoPoolAnyValue_KvlistValue.Get().(*otlpcommon.AnyValue_KvlistValue) - } - ov.KvlistValue = NewOrigKeyValueList() - err = UnmarshalProtoOrigKeyValueList(ov.KvlistValue, buf[startPos:pos]) - if err != nil { - return err - } - orig.Value = ov - - case 7: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field BytesValue", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - var ov *otlpcommon.AnyValue_BytesValue - if !UseProtoPooling.IsEnabled() { - ov = &otlpcommon.AnyValue_BytesValue{} - } else { - ov = ProtoPoolAnyValue_BytesValue.Get().(*otlpcommon.AnyValue_BytesValue) - } - if length != 0 { - ov.BytesValue = make([]byte, length) - copy(ov.BytesValue, buf[startPos:pos]) - } - orig.Value = ov - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalueslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalueslice.go index 7381e5f7728..2fd1f11324d 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalueslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalueslice.go @@ -6,53 +6,24 @@ package internal -import ( - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" -) - -type Slice struct { - orig *[]otlpcommon.AnyValue +type SliceWrapper struct { + orig *[]AnyValue state *State } -func GetOrigSlice(ms Slice) *[]otlpcommon.AnyValue { +func GetSliceOrig(ms SliceWrapper) *[]AnyValue { return ms.orig } -func GetSliceState(ms Slice) *State { +func GetSliceState(ms SliceWrapper) *State { return ms.state } -func NewSlice(orig *[]otlpcommon.AnyValue, state *State) Slice { - return Slice{orig: orig, state: state} -} - -func GenerateTestSlice() Slice { - orig := GenerateOrigTestAnyValueSlice() - return NewSlice(&orig, NewState()) -} - -func CopyOrigAnyValueSlice(dest, src []otlpcommon.AnyValue) []otlpcommon.AnyValue { - var newDest []otlpcommon.AnyValue - if cap(dest) < len(src) { - newDest = make([]otlpcommon.AnyValue, len(src)) - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigAnyValue(&dest[i], false) - } - } - for i := range src { - CopyOrigAnyValue(&newDest[i], &src[i]) - } - return newDest +func NewSliceWrapper(orig *[]AnyValue, state *State) SliceWrapper { + return SliceWrapper{orig: orig, state: state} } -func GenerateOrigTestAnyValueSlice() []otlpcommon.AnyValue { - orig := make([]otlpcommon.AnyValue, 5) - orig[1] = *GenTestOrigAnyValue() - orig[3] = *GenTestOrigAnyValue() - return orig +func GenTestSliceWrapper() SliceWrapper { + orig := GenTestAnyValueSlice() + return NewSliceWrapper(&orig, NewState()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_arrayvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_arrayvalue.go deleted file mode 100644 index c168fe718d2..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_arrayvalue.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolArrayValue = sync.Pool{ - New: func() any { - return &otlpcommon.ArrayValue{} - }, - } -) - -func NewOrigArrayValue() *otlpcommon.ArrayValue { - if !UseProtoPooling.IsEnabled() { - return &otlpcommon.ArrayValue{} - } - return protoPoolArrayValue.Get().(*otlpcommon.ArrayValue) -} - -func DeleteOrigArrayValue(orig *otlpcommon.ArrayValue, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.Values { - DeleteOrigAnyValue(&orig.Values[i], false) - } - - orig.Reset() - if nullable { - protoPoolArrayValue.Put(orig) - } -} - -func CopyOrigArrayValue(dest, src *otlpcommon.ArrayValue) { - // If copying to same object, just return. - if src == dest { - return - } - dest.Values = CopyOrigAnyValueSlice(dest.Values, src.Values) -} - -func GenTestOrigArrayValue() *otlpcommon.ArrayValue { - orig := NewOrigArrayValue() - orig.Values = GenerateOrigTestAnyValueSlice() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigArrayValue(orig *otlpcommon.ArrayValue, dest *json.Stream) { - dest.WriteObjectStart() - if len(orig.Values) > 0 { - dest.WriteObjectField("values") - dest.WriteArrayStart() - MarshalJSONOrigAnyValue(&orig.Values[0], dest) - for i := 1; i < len(orig.Values); i++ { - dest.WriteMore() - MarshalJSONOrigAnyValue(&orig.Values[i], dest) - } - dest.WriteArrayEnd() - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigArrayValue unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigArrayValue(orig *otlpcommon.ArrayValue, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "values": - for iter.ReadArray() { - orig.Values = append(orig.Values, otlpcommon.AnyValue{}) - UnmarshalJSONOrigAnyValue(&orig.Values[len(orig.Values)-1], iter) - } - - default: - iter.Skip() - } - } -} - -func SizeProtoOrigArrayValue(orig *otlpcommon.ArrayValue) int { - var n int - var l int - _ = l - for i := range orig.Values { - l = SizeProtoOrigAnyValue(&orig.Values[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigArrayValue(orig *otlpcommon.ArrayValue, buf []byte) int { - pos := len(buf) - var l int - _ = l - for i := len(orig.Values) - 1; i >= 0; i-- { - l = MarshalProtoOrigAnyValue(&orig.Values[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - return len(buf) - pos -} - -func UnmarshalProtoOrigArrayValue(orig *otlpcommon.ArrayValue, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Values = append(orig.Values, otlpcommon.AnyValue{}) - err = UnmarshalProtoOrigAnyValue(&orig.Values[len(orig.Values)-1], buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go index fe7e4128a70..ecdbd89cea8 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go @@ -6,32 +6,28 @@ package internal -type ByteSlice struct { +type ByteSliceWrapper struct { orig *[]byte state *State } -func GetOrigByteSlice(ms ByteSlice) *[]byte { +func GetByteSliceOrig(ms ByteSliceWrapper) *[]byte { return ms.orig } -func GetByteSliceState(ms ByteSlice) *State { +func GetByteSliceState(ms ByteSliceWrapper) *State { return ms.state } -func NewByteSlice(orig *[]byte, state *State) ByteSlice { - return ByteSlice{orig: orig, state: state} +func NewByteSliceWrapper(orig *[]byte, state *State) ByteSliceWrapper { + return ByteSliceWrapper{orig: orig, state: state} } -func GenerateTestByteSlice() ByteSlice { - orig := GenerateOrigTestByteSlice() - return NewByteSlice(&orig, NewState()) +func GenTestByteSliceWrapper() ByteSliceWrapper { + orig := []byte{1, 2, 3} + return NewByteSliceWrapper(&orig, NewState()) } -func CopyOrigByteSlice(dst, src []byte) []byte { - return append(dst[:0], src...) -} - -func GenerateOrigTestByteSlice() []byte { +func GenTestByteSlice() []byte { return []byte{1, 2, 3} } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityref.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityref.go index 36b6948152a..b4e4ea12ee6 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityref.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityref.go @@ -6,271 +6,23 @@ package internal -import ( - "fmt" - "sync" - - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -type EntityRef struct { - orig *otlpcommon.EntityRef +type EntityRefWrapper struct { + orig *EntityRef state *State } -func GetOrigEntityRef(ms EntityRef) *otlpcommon.EntityRef { +func GetEntityRefOrig(ms EntityRefWrapper) *EntityRef { return ms.orig } -func GetEntityRefState(ms EntityRef) *State { +func GetEntityRefState(ms EntityRefWrapper) *State { return ms.state } -func NewEntityRef(orig *otlpcommon.EntityRef, state *State) EntityRef { - return EntityRef{orig: orig, state: state} -} - -var ( - protoPoolEntityRef = sync.Pool{ - New: func() any { - return &otlpcommon.EntityRef{} - }, - } -) - -func NewOrigEntityRef() *otlpcommon.EntityRef { - if !UseProtoPooling.IsEnabled() { - return &otlpcommon.EntityRef{} - } - return protoPoolEntityRef.Get().(*otlpcommon.EntityRef) -} - -func DeleteOrigEntityRef(orig *otlpcommon.EntityRef, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - orig.Reset() - if nullable { - protoPoolEntityRef.Put(orig) - } -} - -func CopyOrigEntityRef(dest, src *otlpcommon.EntityRef) { - // If copying to same object, just return. - if src == dest { - return - } - dest.SchemaUrl = src.SchemaUrl - dest.Type = src.Type - dest.IdKeys = CopyOrigStringSlice(dest.IdKeys, src.IdKeys) - dest.DescriptionKeys = CopyOrigStringSlice(dest.DescriptionKeys, src.DescriptionKeys) -} - -func GenTestOrigEntityRef() *otlpcommon.EntityRef { - orig := NewOrigEntityRef() - orig.SchemaUrl = "test_schemaurl" - orig.Type = "test_type" - orig.IdKeys = GenerateOrigTestStringSlice() - orig.DescriptionKeys = GenerateOrigTestStringSlice() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigEntityRef(orig *otlpcommon.EntityRef, dest *json.Stream) { - dest.WriteObjectStart() - if orig.SchemaUrl != "" { - dest.WriteObjectField("schemaUrl") - dest.WriteString(orig.SchemaUrl) - } - if orig.Type != "" { - dest.WriteObjectField("type") - dest.WriteString(orig.Type) - } - if len(orig.IdKeys) > 0 { - dest.WriteObjectField("idKeys") - dest.WriteArrayStart() - dest.WriteString(orig.IdKeys[0]) - for i := 1; i < len(orig.IdKeys); i++ { - dest.WriteMore() - dest.WriteString(orig.IdKeys[i]) - } - dest.WriteArrayEnd() - } - if len(orig.DescriptionKeys) > 0 { - dest.WriteObjectField("descriptionKeys") - dest.WriteArrayStart() - dest.WriteString(orig.DescriptionKeys[0]) - for i := 1; i < len(orig.DescriptionKeys); i++ { - dest.WriteMore() - dest.WriteString(orig.DescriptionKeys[i]) - } - dest.WriteArrayEnd() - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigEntityRef unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigEntityRef(orig *otlpcommon.EntityRef, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "schemaUrl", "schema_url": - orig.SchemaUrl = iter.ReadString() - case "type": - orig.Type = iter.ReadString() - case "idKeys", "id_keys": - for iter.ReadArray() { - orig.IdKeys = append(orig.IdKeys, iter.ReadString()) - } - - case "descriptionKeys", "description_keys": - for iter.ReadArray() { - orig.DescriptionKeys = append(orig.DescriptionKeys, iter.ReadString()) - } - - default: - iter.Skip() - } - } -} - -func SizeProtoOrigEntityRef(orig *otlpcommon.EntityRef) int { - var n int - var l int - _ = l - l = len(orig.SchemaUrl) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - l = len(orig.Type) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - for _, s := range orig.IdKeys { - l = len(s) - n += 1 + proto.Sov(uint64(l)) + l - } - for _, s := range orig.DescriptionKeys { - l = len(s) - n += 1 + proto.Sov(uint64(l)) + l - } - return n +func NewEntityRefWrapper(orig *EntityRef, state *State) EntityRefWrapper { + return EntityRefWrapper{orig: orig, state: state} } -func MarshalProtoOrigEntityRef(orig *otlpcommon.EntityRef, buf []byte) int { - pos := len(buf) - var l int - _ = l - l = len(orig.SchemaUrl) - if l > 0 { - pos -= l - copy(buf[pos:], orig.SchemaUrl) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - l = len(orig.Type) - if l > 0 { - pos -= l - copy(buf[pos:], orig.Type) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - for i := len(orig.IdKeys) - 1; i >= 0; i-- { - l = len(orig.IdKeys[i]) - pos -= l - copy(buf[pos:], orig.IdKeys[i]) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x1a - } - for i := len(orig.DescriptionKeys) - 1; i >= 0; i-- { - l = len(orig.DescriptionKeys[i]) - pos -= l - copy(buf[pos:], orig.DescriptionKeys[i]) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x22 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigEntityRef(orig *otlpcommon.EntityRef, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.SchemaUrl = string(buf[startPos:pos]) - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Type = string(buf[startPos:pos]) - - case 3: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field IdKeys", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.IdKeys = append(orig.IdKeys, string(buf[startPos:pos])) - - case 4: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field DescriptionKeys", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.DescriptionKeys = append(orig.DescriptionKeys, string(buf[startPos:pos])) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil +func GenTestEntityRefWrapper() EntityRefWrapper { + return NewEntityRefWrapper(GenTestEntityRef(), NewState()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityrefslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityrefslice.go index ecd851ff6da..8e71ec1de0a 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityrefslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityrefslice.go @@ -6,68 +6,24 @@ package internal -import ( - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" -) - -type EntityRefSlice struct { - orig *[]*otlpcommon.EntityRef +type EntityRefSliceWrapper struct { + orig *[]*EntityRef state *State } -func GetOrigEntityRefSlice(ms EntityRefSlice) *[]*otlpcommon.EntityRef { +func GetEntityRefSliceOrig(ms EntityRefSliceWrapper) *[]*EntityRef { return ms.orig } -func GetEntityRefSliceState(ms EntityRefSlice) *State { +func GetEntityRefSliceState(ms EntityRefSliceWrapper) *State { return ms.state } -func NewEntityRefSlice(orig *[]*otlpcommon.EntityRef, state *State) EntityRefSlice { - return EntityRefSlice{orig: orig, state: state} -} - -func GenerateTestEntityRefSlice() EntityRefSlice { - orig := GenerateOrigTestEntityRefSlice() - return NewEntityRefSlice(&orig, NewState()) -} - -func CopyOrigEntityRefSlice(dest, src []*otlpcommon.EntityRef) []*otlpcommon.EntityRef { - var newDest []*otlpcommon.EntityRef - if cap(dest) < len(src) { - newDest = make([]*otlpcommon.EntityRef, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigEntityRef() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigEntityRef(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigEntityRef() - } - } - for i := range src { - CopyOrigEntityRef(newDest[i], src[i]) - } - return newDest +func NewEntityRefSliceWrapper(orig *[]*EntityRef, state *State) EntityRefSliceWrapper { + return EntityRefSliceWrapper{orig: orig, state: state} } -func GenerateOrigTestEntityRefSlice() []*otlpcommon.EntityRef { - orig := make([]*otlpcommon.EntityRef, 5) - orig[0] = NewOrigEntityRef() - orig[1] = GenTestOrigEntityRef() - orig[2] = NewOrigEntityRef() - orig[3] = GenTestOrigEntityRef() - orig[4] = NewOrigEntityRef() - return orig +func GenTestEntityRefSliceWrapper() EntityRefSliceWrapper { + orig := GenTestEntityRefPtrSlice() + return NewEntityRefSliceWrapper(&orig, NewState()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exemplar.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exemplar.go deleted file mode 100644 index 3c3222e6db0..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exemplar.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "encoding/binary" - "fmt" - "math" - "sync" - - "go.opentelemetry.io/collector/pdata/internal/data" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolExemplar = sync.Pool{ - New: func() any { - return &otlpmetrics.Exemplar{} - }, - } - - ProtoPoolExemplar_AsDouble = sync.Pool{ - New: func() any { - return &otlpmetrics.Exemplar_AsDouble{} - }, - } - - ProtoPoolExemplar_AsInt = sync.Pool{ - New: func() any { - return &otlpmetrics.Exemplar_AsInt{} - }, - } -) - -func NewOrigExemplar() *otlpmetrics.Exemplar { - if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.Exemplar{} - } - return protoPoolExemplar.Get().(*otlpmetrics.Exemplar) -} - -func DeleteOrigExemplar(orig *otlpmetrics.Exemplar, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.FilteredAttributes { - DeleteOrigKeyValue(&orig.FilteredAttributes[i], false) - } - switch ov := orig.Value.(type) { - case *otlpmetrics.Exemplar_AsDouble: - if UseProtoPooling.IsEnabled() { - ov.AsDouble = float64(0) - ProtoPoolExemplar_AsDouble.Put(ov) - } - case *otlpmetrics.Exemplar_AsInt: - if UseProtoPooling.IsEnabled() { - ov.AsInt = int64(0) - ProtoPoolExemplar_AsInt.Put(ov) - } - - } - DeleteOrigSpanID(&orig.SpanId, false) - DeleteOrigTraceID(&orig.TraceId, false) - - orig.Reset() - if nullable { - protoPoolExemplar.Put(orig) - } -} - -func CopyOrigExemplar(dest, src *otlpmetrics.Exemplar) { - // If copying to same object, just return. - if src == dest { - return - } - dest.FilteredAttributes = CopyOrigKeyValueSlice(dest.FilteredAttributes, src.FilteredAttributes) - dest.TimeUnixNano = src.TimeUnixNano - switch t := src.Value.(type) { - case *otlpmetrics.Exemplar_AsDouble: - var ov *otlpmetrics.Exemplar_AsDouble - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Exemplar_AsDouble{} - } else { - ov = ProtoPoolExemplar_AsDouble.Get().(*otlpmetrics.Exemplar_AsDouble) - } - ov.AsDouble = t.AsDouble - dest.Value = ov - case *otlpmetrics.Exemplar_AsInt: - var ov *otlpmetrics.Exemplar_AsInt - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Exemplar_AsInt{} - } else { - ov = ProtoPoolExemplar_AsInt.Get().(*otlpmetrics.Exemplar_AsInt) - } - ov.AsInt = t.AsInt - dest.Value = ov - } - dest.SpanId = src.SpanId - dest.TraceId = src.TraceId -} - -func GenTestOrigExemplar() *otlpmetrics.Exemplar { - orig := NewOrigExemplar() - orig.FilteredAttributes = GenerateOrigTestKeyValueSlice() - orig.TimeUnixNano = 1234567890 - orig.Value = &otlpmetrics.Exemplar_AsInt{AsInt: int64(13)} - orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1}) - orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExemplar(orig *otlpmetrics.Exemplar, dest *json.Stream) { - dest.WriteObjectStart() - if len(orig.FilteredAttributes) > 0 { - dest.WriteObjectField("filteredAttributes") - dest.WriteArrayStart() - MarshalJSONOrigKeyValue(&orig.FilteredAttributes[0], dest) - for i := 1; i < len(orig.FilteredAttributes); i++ { - dest.WriteMore() - MarshalJSONOrigKeyValue(&orig.FilteredAttributes[i], dest) - } - dest.WriteArrayEnd() - } - if orig.TimeUnixNano != uint64(0) { - dest.WriteObjectField("timeUnixNano") - dest.WriteUint64(orig.TimeUnixNano) - } - switch orig := orig.Value.(type) { - case *otlpmetrics.Exemplar_AsDouble: - dest.WriteObjectField("asDouble") - dest.WriteFloat64(orig.AsDouble) - case *otlpmetrics.Exemplar_AsInt: - dest.WriteObjectField("asInt") - dest.WriteInt64(orig.AsInt) - } - if orig.SpanId != data.SpanID([8]byte{}) { - dest.WriteObjectField("spanId") - MarshalJSONOrigSpanID(&orig.SpanId, dest) - } - if orig.TraceId != data.TraceID([16]byte{}) { - dest.WriteObjectField("traceId") - MarshalJSONOrigTraceID(&orig.TraceId, dest) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigExemplar unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExemplar(orig *otlpmetrics.Exemplar, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "filteredAttributes", "filtered_attributes": - for iter.ReadArray() { - orig.FilteredAttributes = append(orig.FilteredAttributes, otlpcommon.KeyValue{}) - UnmarshalJSONOrigKeyValue(&orig.FilteredAttributes[len(orig.FilteredAttributes)-1], iter) - } - - case "timeUnixNano", "time_unix_nano": - orig.TimeUnixNano = iter.ReadUint64() - - case "asDouble", "as_double": - { - var ov *otlpmetrics.Exemplar_AsDouble - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Exemplar_AsDouble{} - } else { - ov = ProtoPoolExemplar_AsDouble.Get().(*otlpmetrics.Exemplar_AsDouble) - } - ov.AsDouble = iter.ReadFloat64() - orig.Value = ov - } - - case "asInt", "as_int": - { - var ov *otlpmetrics.Exemplar_AsInt - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Exemplar_AsInt{} - } else { - ov = ProtoPoolExemplar_AsInt.Get().(*otlpmetrics.Exemplar_AsInt) - } - ov.AsInt = iter.ReadInt64() - orig.Value = ov - } - - case "spanId", "span_id": - UnmarshalJSONOrigSpanID(&orig.SpanId, iter) - case "traceId", "trace_id": - UnmarshalJSONOrigTraceID(&orig.TraceId, iter) - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExemplar(orig *otlpmetrics.Exemplar) int { - var n int - var l int - _ = l - for i := range orig.FilteredAttributes { - l = SizeProtoOrigKeyValue(&orig.FilteredAttributes[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - if orig.TimeUnixNano != 0 { - n += 9 - } - switch orig := orig.Value.(type) { - case nil: - _ = orig - break - case *otlpmetrics.Exemplar_AsDouble: - n += 9 - case *otlpmetrics.Exemplar_AsInt: - n += 9 - } - l = SizeProtoOrigSpanID(&orig.SpanId) - n += 1 + proto.Sov(uint64(l)) + l - l = SizeProtoOrigTraceID(&orig.TraceId) - n += 1 + proto.Sov(uint64(l)) + l - return n -} - -func MarshalProtoOrigExemplar(orig *otlpmetrics.Exemplar, buf []byte) int { - pos := len(buf) - var l int - _ = l - for i := len(orig.FilteredAttributes) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValue(&orig.FilteredAttributes[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x3a - } - if orig.TimeUnixNano != 0 { - pos -= 8 - binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano)) - pos-- - buf[pos] = 0x11 - } - switch orig := orig.Value.(type) { - case *otlpmetrics.Exemplar_AsDouble: - pos -= 8 - binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.AsDouble)) - pos-- - buf[pos] = 0x19 - - case *otlpmetrics.Exemplar_AsInt: - pos -= 8 - binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.AsInt)) - pos-- - buf[pos] = 0x31 - - } - - l = MarshalProtoOrigSpanID(&orig.SpanId, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x22 - - l = MarshalProtoOrigTraceID(&orig.TraceId, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x2a - - return len(buf) - pos -} - -func UnmarshalProtoOrigExemplar(orig *otlpmetrics.Exemplar, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 7: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field FilteredAttributes", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.FilteredAttributes = append(orig.FilteredAttributes, otlpcommon.KeyValue{}) - err = UnmarshalProtoOrigKeyValue(&orig.FilteredAttributes[len(orig.FilteredAttributes)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeI64 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeI64(buf, pos) - if err != nil { - return err - } - - orig.TimeUnixNano = uint64(num) - - case 3: - if wireType != proto.WireTypeI64 { - return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeI64(buf, pos) - if err != nil { - return err - } - var ov *otlpmetrics.Exemplar_AsDouble - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Exemplar_AsDouble{} - } else { - ov = ProtoPoolExemplar_AsDouble.Get().(*otlpmetrics.Exemplar_AsDouble) - } - ov.AsDouble = math.Float64frombits(num) - orig.Value = ov - - case 6: - if wireType != proto.WireTypeI64 { - return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeI64(buf, pos) - if err != nil { - return err - } - var ov *otlpmetrics.Exemplar_AsInt - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Exemplar_AsInt{} - } else { - ov = ProtoPoolExemplar_AsInt.Get().(*otlpmetrics.Exemplar_AsInt) - } - ov.AsInt = int64(num) - orig.Value = ov - - case 4: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigSpanID(&orig.SpanId, buf[startPos:pos]) - if err != nil { - return err - } - - case 5: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigTraceID(&orig.TraceId, buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exemplarslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exemplarslice.go deleted file mode 100644 index ab91c7c5931..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exemplarslice.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" -) - -func CopyOrigExemplarSlice(dest, src []otlpmetrics.Exemplar) []otlpmetrics.Exemplar { - var newDest []otlpmetrics.Exemplar - if cap(dest) < len(src) { - newDest = make([]otlpmetrics.Exemplar, len(src)) - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigExemplar(&dest[i], false) - } - } - for i := range src { - CopyOrigExemplar(&newDest[i], &src[i]) - } - return newDest -} - -func GenerateOrigTestExemplarSlice() []otlpmetrics.Exemplar { - orig := make([]otlpmetrics.Exemplar, 5) - orig[1] = *GenTestOrigExemplar() - orig[3] = *GenTestOrigExemplar() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogram.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogram.go deleted file mode 100644 index 1f4838d7457..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogram.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolExponentialHistogram = sync.Pool{ - New: func() any { - return &otlpmetrics.ExponentialHistogram{} - }, - } -) - -func NewOrigExponentialHistogram() *otlpmetrics.ExponentialHistogram { - if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.ExponentialHistogram{} - } - return protoPoolExponentialHistogram.Get().(*otlpmetrics.ExponentialHistogram) -} - -func DeleteOrigExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.DataPoints { - DeleteOrigExponentialHistogramDataPoint(orig.DataPoints[i], true) - } - - orig.Reset() - if nullable { - protoPoolExponentialHistogram.Put(orig) - } -} - -func CopyOrigExponentialHistogram(dest, src *otlpmetrics.ExponentialHistogram) { - // If copying to same object, just return. - if src == dest { - return - } - dest.DataPoints = CopyOrigExponentialHistogramDataPointSlice(dest.DataPoints, src.DataPoints) - dest.AggregationTemporality = src.AggregationTemporality -} - -func GenTestOrigExponentialHistogram() *otlpmetrics.ExponentialHistogram { - orig := NewOrigExponentialHistogram() - orig.DataPoints = GenerateOrigTestExponentialHistogramDataPointSlice() - orig.AggregationTemporality = otlpmetrics.AggregationTemporality(1) - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, dest *json.Stream) { - dest.WriteObjectStart() - if len(orig.DataPoints) > 0 { - dest.WriteObjectField("dataPoints") - dest.WriteArrayStart() - MarshalJSONOrigExponentialHistogramDataPoint(orig.DataPoints[0], dest) - for i := 1; i < len(orig.DataPoints); i++ { - dest.WriteMore() - MarshalJSONOrigExponentialHistogramDataPoint(orig.DataPoints[i], dest) - } - dest.WriteArrayEnd() - } - - if int32(orig.AggregationTemporality) != 0 { - dest.WriteObjectField("aggregationTemporality") - dest.WriteInt32(int32(orig.AggregationTemporality)) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigExponentialHistogram unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "dataPoints", "data_points": - for iter.ReadArray() { - orig.DataPoints = append(orig.DataPoints, NewOrigExponentialHistogramDataPoint()) - UnmarshalJSONOrigExponentialHistogramDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter) - } - - case "aggregationTemporality", "aggregation_temporality": - orig.AggregationTemporality = otlpmetrics.AggregationTemporality(iter.ReadEnumValue(otlpmetrics.AggregationTemporality_value)) - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExponentialHistogram(orig *otlpmetrics.ExponentialHistogram) int { - var n int - var l int - _ = l - for i := range orig.DataPoints { - l = SizeProtoOrigExponentialHistogramDataPoint(orig.DataPoints[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - if orig.AggregationTemporality != 0 { - n += 1 + proto.Sov(uint64(orig.AggregationTemporality)) - } - return n -} - -func MarshalProtoOrigExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, buf []byte) int { - pos := len(buf) - var l int - _ = l - for i := len(orig.DataPoints) - 1; i >= 0; i-- { - l = MarshalProtoOrigExponentialHistogramDataPoint(orig.DataPoints[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - if orig.AggregationTemporality != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality)) - pos-- - buf[pos] = 0x10 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.DataPoints = append(orig.DataPoints, NewOrigExponentialHistogramDataPoint()) - err = UnmarshalProtoOrigExponentialHistogramDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.AggregationTemporality = otlpmetrics.AggregationTemporality(num) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapoint_buckets.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapoint_buckets.go deleted file mode 100644 index 35d7a78ec9a..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapoint_buckets.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolExponentialHistogramDataPoint_Buckets = sync.Pool{ - New: func() any { - return &otlpmetrics.ExponentialHistogramDataPoint_Buckets{} - }, - } -) - -func NewOrigExponentialHistogramDataPoint_Buckets() *otlpmetrics.ExponentialHistogramDataPoint_Buckets { - if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.ExponentialHistogramDataPoint_Buckets{} - } - return protoPoolExponentialHistogramDataPoint_Buckets.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Buckets) -} - -func DeleteOrigExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - orig.Reset() - if nullable { - protoPoolExponentialHistogramDataPoint_Buckets.Put(orig) - } -} - -func CopyOrigExponentialHistogramDataPoint_Buckets(dest, src *otlpmetrics.ExponentialHistogramDataPoint_Buckets) { - // If copying to same object, just return. - if src == dest { - return - } - dest.Offset = src.Offset - dest.BucketCounts = CopyOrigUint64Slice(dest.BucketCounts, src.BucketCounts) -} - -func GenTestOrigExponentialHistogramDataPoint_Buckets() *otlpmetrics.ExponentialHistogramDataPoint_Buckets { - orig := NewOrigExponentialHistogramDataPoint_Buckets() - orig.Offset = int32(13) - orig.BucketCounts = GenerateOrigTestUint64Slice() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, dest *json.Stream) { - dest.WriteObjectStart() - if orig.Offset != int32(0) { - dest.WriteObjectField("offset") - dest.WriteInt32(orig.Offset) - } - if len(orig.BucketCounts) > 0 { - dest.WriteObjectField("bucketCounts") - dest.WriteArrayStart() - dest.WriteUint64(orig.BucketCounts[0]) - for i := 1; i < len(orig.BucketCounts); i++ { - dest.WriteMore() - dest.WriteUint64(orig.BucketCounts[i]) - } - dest.WriteArrayEnd() - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigExponentialHistogramDataPointBuckets unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "offset": - orig.Offset = iter.ReadInt32() - case "bucketCounts", "bucket_counts": - for iter.ReadArray() { - orig.BucketCounts = append(orig.BucketCounts, iter.ReadUint64()) - } - - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets) int { - var n int - var l int - _ = l - if orig.Offset != 0 { - n += 1 + proto.Soz(uint64(orig.Offset)) - } - if len(orig.BucketCounts) > 0 { - l = 0 - for _, e := range orig.BucketCounts { - l += proto.Sov(uint64(e)) - } - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, buf []byte) int { - pos := len(buf) - var l int - _ = l - if orig.Offset != 0 { - pos = proto.EncodeVarint(buf, pos, uint64((uint32(orig.Offset)<<1)^uint32(orig.Offset>>31))) - pos-- - buf[pos] = 0x8 - } - l = len(orig.BucketCounts) - if l > 0 { - endPos := pos - for i := l - 1; i >= 0; i-- { - pos = proto.EncodeVarint(buf, pos, uint64(orig.BucketCounts[i])) - } - pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos)) - pos-- - buf[pos] = 0x12 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.Offset = int32(uint32(num>>1) ^ uint32(int32((num&1)<<31)>>31)) - case 2: - switch wireType { - case proto.WireTypeLen: - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - var num uint64 - for startPos < pos { - num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos) - if err != nil { - return err - } - orig.BucketCounts = append(orig.BucketCounts, uint64(num)) - } - if startPos != pos { - return fmt.Errorf("proto: invalid field len = %d for field BucketCounts", pos-startPos) - } - case proto.WireTypeVarint: - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - orig.BucketCounts = append(orig.BucketCounts, uint64(num)) - default: - return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType) - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapointslice.go deleted file mode 100644 index 021c0d0a267..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapointslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" -) - -func CopyOrigExponentialHistogramDataPointSlice(dest, src []*otlpmetrics.ExponentialHistogramDataPoint) []*otlpmetrics.ExponentialHistogramDataPoint { - var newDest []*otlpmetrics.ExponentialHistogramDataPoint - if cap(dest) < len(src) { - newDest = make([]*otlpmetrics.ExponentialHistogramDataPoint, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigExponentialHistogramDataPoint() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigExponentialHistogramDataPoint(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigExponentialHistogramDataPoint() - } - } - for i := range src { - CopyOrigExponentialHistogramDataPoint(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestExponentialHistogramDataPointSlice() []*otlpmetrics.ExponentialHistogramDataPoint { - orig := make([]*otlpmetrics.ExponentialHistogramDataPoint, 5) - orig[0] = NewOrigExponentialHistogramDataPoint() - orig[1] = GenTestOrigExponentialHistogramDataPoint() - orig[2] = NewOrigExponentialHistogramDataPoint() - orig[3] = GenTestOrigExponentialHistogramDataPoint() - orig[4] = NewOrigExponentialHistogramDataPoint() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogspartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogspartialsuccess.go deleted file mode 100644 index 4a55de6ded3..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogspartialsuccess.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolExportLogsPartialSuccess = sync.Pool{ - New: func() any { - return &otlpcollectorlogs.ExportLogsPartialSuccess{} - }, - } -) - -func NewOrigExportLogsPartialSuccess() *otlpcollectorlogs.ExportLogsPartialSuccess { - if !UseProtoPooling.IsEnabled() { - return &otlpcollectorlogs.ExportLogsPartialSuccess{} - } - return protoPoolExportLogsPartialSuccess.Get().(*otlpcollectorlogs.ExportLogsPartialSuccess) -} - -func DeleteOrigExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - orig.Reset() - if nullable { - protoPoolExportLogsPartialSuccess.Put(orig) - } -} - -func CopyOrigExportLogsPartialSuccess(dest, src *otlpcollectorlogs.ExportLogsPartialSuccess) { - // If copying to same object, just return. - if src == dest { - return - } - dest.RejectedLogRecords = src.RejectedLogRecords - dest.ErrorMessage = src.ErrorMessage -} - -func GenTestOrigExportLogsPartialSuccess() *otlpcollectorlogs.ExportLogsPartialSuccess { - orig := NewOrigExportLogsPartialSuccess() - orig.RejectedLogRecords = int64(13) - orig.ErrorMessage = "test_errormessage" - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, dest *json.Stream) { - dest.WriteObjectStart() - if orig.RejectedLogRecords != int64(0) { - dest.WriteObjectField("rejectedLogRecords") - dest.WriteInt64(orig.RejectedLogRecords) - } - if orig.ErrorMessage != "" { - dest.WriteObjectField("errorMessage") - dest.WriteString(orig.ErrorMessage) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigExportPartialSuccess unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "rejectedLogRecords", "rejected_log_records": - orig.RejectedLogRecords = iter.ReadInt64() - case "errorMessage", "error_message": - orig.ErrorMessage = iter.ReadString() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess) int { - var n int - var l int - _ = l - if orig.RejectedLogRecords != 0 { - n += 1 + proto.Sov(uint64(orig.RejectedLogRecords)) - } - l = len(orig.ErrorMessage) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, buf []byte) int { - pos := len(buf) - var l int - _ = l - if orig.RejectedLogRecords != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedLogRecords)) - pos-- - buf[pos] = 0x8 - } - l = len(orig.ErrorMessage) - if l > 0 { - pos -= l - copy(buf[pos:], orig.ErrorMessage) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field RejectedLogRecords", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.RejectedLogRecords = int64(num) - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.ErrorMessage = string(buf[startPos:pos]) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsservicerequest.go index 090e82363ce..a529cd105bc 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsservicerequest.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsservicerequest.go @@ -6,174 +6,23 @@ package internal -import ( - "fmt" - "sync" - - otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -type Logs struct { - orig *otlpcollectorlogs.ExportLogsServiceRequest +type LogsWrapper struct { + orig *ExportLogsServiceRequest state *State } -func GetOrigLogs(ms Logs) *otlpcollectorlogs.ExportLogsServiceRequest { +func GetLogsOrig(ms LogsWrapper) *ExportLogsServiceRequest { return ms.orig } -func GetLogsState(ms Logs) *State { +func GetLogsState(ms LogsWrapper) *State { return ms.state } -func NewLogs(orig *otlpcollectorlogs.ExportLogsServiceRequest, state *State) Logs { - return Logs{orig: orig, state: state} +func NewLogsWrapper(orig *ExportLogsServiceRequest, state *State) LogsWrapper { + return LogsWrapper{orig: orig, state: state} } -var ( - protoPoolExportLogsServiceRequest = sync.Pool{ - New: func() any { - return &otlpcollectorlogs.ExportLogsServiceRequest{} - }, - } -) - -func NewOrigExportLogsServiceRequest() *otlpcollectorlogs.ExportLogsServiceRequest { - if !UseProtoPooling.IsEnabled() { - return &otlpcollectorlogs.ExportLogsServiceRequest{} - } - return protoPoolExportLogsServiceRequest.Get().(*otlpcollectorlogs.ExportLogsServiceRequest) -} - -func DeleteOrigExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.ResourceLogs { - DeleteOrigResourceLogs(orig.ResourceLogs[i], true) - } - - orig.Reset() - if nullable { - protoPoolExportLogsServiceRequest.Put(orig) - } -} - -func CopyOrigExportLogsServiceRequest(dest, src *otlpcollectorlogs.ExportLogsServiceRequest) { - // If copying to same object, just return. - if src == dest { - return - } - dest.ResourceLogs = CopyOrigResourceLogsSlice(dest.ResourceLogs, src.ResourceLogs) -} - -func GenTestOrigExportLogsServiceRequest() *otlpcollectorlogs.ExportLogsServiceRequest { - orig := NewOrigExportLogsServiceRequest() - orig.ResourceLogs = GenerateOrigTestResourceLogsSlice() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, dest *json.Stream) { - dest.WriteObjectStart() - if len(orig.ResourceLogs) > 0 { - dest.WriteObjectField("resourceLogs") - dest.WriteArrayStart() - MarshalJSONOrigResourceLogs(orig.ResourceLogs[0], dest) - for i := 1; i < len(orig.ResourceLogs); i++ { - dest.WriteMore() - MarshalJSONOrigResourceLogs(orig.ResourceLogs[i], dest) - } - dest.WriteArrayEnd() - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigLogs unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "resourceLogs", "resource_logs": - for iter.ReadArray() { - orig.ResourceLogs = append(orig.ResourceLogs, NewOrigResourceLogs()) - UnmarshalJSONOrigResourceLogs(orig.ResourceLogs[len(orig.ResourceLogs)-1], iter) - } - - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest) int { - var n int - var l int - _ = l - for i := range orig.ResourceLogs { - l = SizeProtoOrigResourceLogs(orig.ResourceLogs[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, buf []byte) int { - pos := len(buf) - var l int - _ = l - for i := len(orig.ResourceLogs) - 1; i >= 0; i-- { - l = MarshalProtoOrigResourceLogs(orig.ResourceLogs[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - return len(buf) - pos -} - -func UnmarshalProtoOrigExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.ResourceLogs = append(orig.ResourceLogs, NewOrigResourceLogs()) - err = UnmarshalProtoOrigResourceLogs(orig.ResourceLogs[len(orig.ResourceLogs)-1], buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil +func GenTestLogsWrapper() LogsWrapper { + return NewLogsWrapper(GenTestExportLogsServiceRequest(), NewState()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsserviceresponse.go deleted file mode 100644 index 13bb93a0499..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsserviceresponse.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolExportLogsServiceResponse = sync.Pool{ - New: func() any { - return &otlpcollectorlogs.ExportLogsServiceResponse{} - }, - } -) - -func NewOrigExportLogsServiceResponse() *otlpcollectorlogs.ExportLogsServiceResponse { - if !UseProtoPooling.IsEnabled() { - return &otlpcollectorlogs.ExportLogsServiceResponse{} - } - return protoPoolExportLogsServiceResponse.Get().(*otlpcollectorlogs.ExportLogsServiceResponse) -} - -func DeleteOrigExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigExportLogsPartialSuccess(&orig.PartialSuccess, false) - - orig.Reset() - if nullable { - protoPoolExportLogsServiceResponse.Put(orig) - } -} - -func CopyOrigExportLogsServiceResponse(dest, src *otlpcollectorlogs.ExportLogsServiceResponse) { - // If copying to same object, just return. - if src == dest { - return - } - CopyOrigExportLogsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess) -} - -func GenTestOrigExportLogsServiceResponse() *otlpcollectorlogs.ExportLogsServiceResponse { - orig := NewOrigExportLogsServiceResponse() - orig.PartialSuccess = *GenTestOrigExportLogsPartialSuccess() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, dest *json.Stream) { - dest.WriteObjectStart() - dest.WriteObjectField("partialSuccess") - MarshalJSONOrigExportLogsPartialSuccess(&orig.PartialSuccess, dest) - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigExportResponse unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "partialSuccess", "partial_success": - UnmarshalJSONOrigExportLogsPartialSuccess(&orig.PartialSuccess, iter) - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse) int { - var n int - var l int - _ = l - l = SizeProtoOrigExportLogsPartialSuccess(&orig.PartialSuccess) - n += 1 + proto.Sov(uint64(l)) + l - return n -} - -func MarshalProtoOrigExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, buf []byte) int { - pos := len(buf) - var l int - _ = l - - l = MarshalProtoOrigExportLogsPartialSuccess(&orig.PartialSuccess, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - return len(buf) - pos -} - -func UnmarshalProtoOrigExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigExportLogsPartialSuccess(&orig.PartialSuccess, buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricspartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricspartialsuccess.go deleted file mode 100644 index 8bf2baeb230..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricspartialsuccess.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolExportMetricsPartialSuccess = sync.Pool{ - New: func() any { - return &otlpcollectormetrics.ExportMetricsPartialSuccess{} - }, - } -) - -func NewOrigExportMetricsPartialSuccess() *otlpcollectormetrics.ExportMetricsPartialSuccess { - if !UseProtoPooling.IsEnabled() { - return &otlpcollectormetrics.ExportMetricsPartialSuccess{} - } - return protoPoolExportMetricsPartialSuccess.Get().(*otlpcollectormetrics.ExportMetricsPartialSuccess) -} - -func DeleteOrigExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - orig.Reset() - if nullable { - protoPoolExportMetricsPartialSuccess.Put(orig) - } -} - -func CopyOrigExportMetricsPartialSuccess(dest, src *otlpcollectormetrics.ExportMetricsPartialSuccess) { - // If copying to same object, just return. - if src == dest { - return - } - dest.RejectedDataPoints = src.RejectedDataPoints - dest.ErrorMessage = src.ErrorMessage -} - -func GenTestOrigExportMetricsPartialSuccess() *otlpcollectormetrics.ExportMetricsPartialSuccess { - orig := NewOrigExportMetricsPartialSuccess() - orig.RejectedDataPoints = int64(13) - orig.ErrorMessage = "test_errormessage" - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, dest *json.Stream) { - dest.WriteObjectStart() - if orig.RejectedDataPoints != int64(0) { - dest.WriteObjectField("rejectedDataPoints") - dest.WriteInt64(orig.RejectedDataPoints) - } - if orig.ErrorMessage != "" { - dest.WriteObjectField("errorMessage") - dest.WriteString(orig.ErrorMessage) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigExportPartialSuccess unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "rejectedDataPoints", "rejected_data_points": - orig.RejectedDataPoints = iter.ReadInt64() - case "errorMessage", "error_message": - orig.ErrorMessage = iter.ReadString() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess) int { - var n int - var l int - _ = l - if orig.RejectedDataPoints != 0 { - n += 1 + proto.Sov(uint64(orig.RejectedDataPoints)) - } - l = len(orig.ErrorMessage) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, buf []byte) int { - pos := len(buf) - var l int - _ = l - if orig.RejectedDataPoints != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedDataPoints)) - pos-- - buf[pos] = 0x8 - } - l = len(orig.ErrorMessage) - if l > 0 { - pos -= l - copy(buf[pos:], orig.ErrorMessage) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field RejectedDataPoints", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.RejectedDataPoints = int64(num) - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.ErrorMessage = string(buf[startPos:pos]) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsservicerequest.go index b3c06e55eb4..0d378e4d74d 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsservicerequest.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsservicerequest.go @@ -6,174 +6,23 @@ package internal -import ( - "fmt" - "sync" - - otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -type Metrics struct { - orig *otlpcollectormetrics.ExportMetricsServiceRequest +type MetricsWrapper struct { + orig *ExportMetricsServiceRequest state *State } -func GetOrigMetrics(ms Metrics) *otlpcollectormetrics.ExportMetricsServiceRequest { +func GetMetricsOrig(ms MetricsWrapper) *ExportMetricsServiceRequest { return ms.orig } -func GetMetricsState(ms Metrics) *State { +func GetMetricsState(ms MetricsWrapper) *State { return ms.state } -func NewMetrics(orig *otlpcollectormetrics.ExportMetricsServiceRequest, state *State) Metrics { - return Metrics{orig: orig, state: state} +func NewMetricsWrapper(orig *ExportMetricsServiceRequest, state *State) MetricsWrapper { + return MetricsWrapper{orig: orig, state: state} } -var ( - protoPoolExportMetricsServiceRequest = sync.Pool{ - New: func() any { - return &otlpcollectormetrics.ExportMetricsServiceRequest{} - }, - } -) - -func NewOrigExportMetricsServiceRequest() *otlpcollectormetrics.ExportMetricsServiceRequest { - if !UseProtoPooling.IsEnabled() { - return &otlpcollectormetrics.ExportMetricsServiceRequest{} - } - return protoPoolExportMetricsServiceRequest.Get().(*otlpcollectormetrics.ExportMetricsServiceRequest) -} - -func DeleteOrigExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.ResourceMetrics { - DeleteOrigResourceMetrics(orig.ResourceMetrics[i], true) - } - - orig.Reset() - if nullable { - protoPoolExportMetricsServiceRequest.Put(orig) - } -} - -func CopyOrigExportMetricsServiceRequest(dest, src *otlpcollectormetrics.ExportMetricsServiceRequest) { - // If copying to same object, just return. - if src == dest { - return - } - dest.ResourceMetrics = CopyOrigResourceMetricsSlice(dest.ResourceMetrics, src.ResourceMetrics) -} - -func GenTestOrigExportMetricsServiceRequest() *otlpcollectormetrics.ExportMetricsServiceRequest { - orig := NewOrigExportMetricsServiceRequest() - orig.ResourceMetrics = GenerateOrigTestResourceMetricsSlice() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, dest *json.Stream) { - dest.WriteObjectStart() - if len(orig.ResourceMetrics) > 0 { - dest.WriteObjectField("resourceMetrics") - dest.WriteArrayStart() - MarshalJSONOrigResourceMetrics(orig.ResourceMetrics[0], dest) - for i := 1; i < len(orig.ResourceMetrics); i++ { - dest.WriteMore() - MarshalJSONOrigResourceMetrics(orig.ResourceMetrics[i], dest) - } - dest.WriteArrayEnd() - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigMetrics unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "resourceMetrics", "resource_metrics": - for iter.ReadArray() { - orig.ResourceMetrics = append(orig.ResourceMetrics, NewOrigResourceMetrics()) - UnmarshalJSONOrigResourceMetrics(orig.ResourceMetrics[len(orig.ResourceMetrics)-1], iter) - } - - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest) int { - var n int - var l int - _ = l - for i := range orig.ResourceMetrics { - l = SizeProtoOrigResourceMetrics(orig.ResourceMetrics[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, buf []byte) int { - pos := len(buf) - var l int - _ = l - for i := len(orig.ResourceMetrics) - 1; i >= 0; i-- { - l = MarshalProtoOrigResourceMetrics(orig.ResourceMetrics[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - return len(buf) - pos -} - -func UnmarshalProtoOrigExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.ResourceMetrics = append(orig.ResourceMetrics, NewOrigResourceMetrics()) - err = UnmarshalProtoOrigResourceMetrics(orig.ResourceMetrics[len(orig.ResourceMetrics)-1], buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil +func GenTestMetricsWrapper() MetricsWrapper { + return NewMetricsWrapper(GenTestExportMetricsServiceRequest(), NewState()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsserviceresponse.go deleted file mode 100644 index bdc770f475b..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsserviceresponse.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolExportMetricsServiceResponse = sync.Pool{ - New: func() any { - return &otlpcollectormetrics.ExportMetricsServiceResponse{} - }, - } -) - -func NewOrigExportMetricsServiceResponse() *otlpcollectormetrics.ExportMetricsServiceResponse { - if !UseProtoPooling.IsEnabled() { - return &otlpcollectormetrics.ExportMetricsServiceResponse{} - } - return protoPoolExportMetricsServiceResponse.Get().(*otlpcollectormetrics.ExportMetricsServiceResponse) -} - -func DeleteOrigExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigExportMetricsPartialSuccess(&orig.PartialSuccess, false) - - orig.Reset() - if nullable { - protoPoolExportMetricsServiceResponse.Put(orig) - } -} - -func CopyOrigExportMetricsServiceResponse(dest, src *otlpcollectormetrics.ExportMetricsServiceResponse) { - // If copying to same object, just return. - if src == dest { - return - } - CopyOrigExportMetricsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess) -} - -func GenTestOrigExportMetricsServiceResponse() *otlpcollectormetrics.ExportMetricsServiceResponse { - orig := NewOrigExportMetricsServiceResponse() - orig.PartialSuccess = *GenTestOrigExportMetricsPartialSuccess() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, dest *json.Stream) { - dest.WriteObjectStart() - dest.WriteObjectField("partialSuccess") - MarshalJSONOrigExportMetricsPartialSuccess(&orig.PartialSuccess, dest) - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigExportResponse unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "partialSuccess", "partial_success": - UnmarshalJSONOrigExportMetricsPartialSuccess(&orig.PartialSuccess, iter) - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse) int { - var n int - var l int - _ = l - l = SizeProtoOrigExportMetricsPartialSuccess(&orig.PartialSuccess) - n += 1 + proto.Sov(uint64(l)) + l - return n -} - -func MarshalProtoOrigExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, buf []byte) int { - pos := len(buf) - var l int - _ = l - - l = MarshalProtoOrigExportMetricsPartialSuccess(&orig.PartialSuccess, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - return len(buf) - pos -} - -func UnmarshalProtoOrigExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigExportMetricsPartialSuccess(&orig.PartialSuccess, buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilespartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilespartialsuccess.go deleted file mode 100644 index f1305cc2d27..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilespartialsuccess.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolExportProfilesPartialSuccess = sync.Pool{ - New: func() any { - return &otlpcollectorprofiles.ExportProfilesPartialSuccess{} - }, - } -) - -func NewOrigExportProfilesPartialSuccess() *otlpcollectorprofiles.ExportProfilesPartialSuccess { - if !UseProtoPooling.IsEnabled() { - return &otlpcollectorprofiles.ExportProfilesPartialSuccess{} - } - return protoPoolExportProfilesPartialSuccess.Get().(*otlpcollectorprofiles.ExportProfilesPartialSuccess) -} - -func DeleteOrigExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - orig.Reset() - if nullable { - protoPoolExportProfilesPartialSuccess.Put(orig) - } -} - -func CopyOrigExportProfilesPartialSuccess(dest, src *otlpcollectorprofiles.ExportProfilesPartialSuccess) { - // If copying to same object, just return. - if src == dest { - return - } - dest.RejectedProfiles = src.RejectedProfiles - dest.ErrorMessage = src.ErrorMessage -} - -func GenTestOrigExportProfilesPartialSuccess() *otlpcollectorprofiles.ExportProfilesPartialSuccess { - orig := NewOrigExportProfilesPartialSuccess() - orig.RejectedProfiles = int64(13) - orig.ErrorMessage = "test_errormessage" - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, dest *json.Stream) { - dest.WriteObjectStart() - if orig.RejectedProfiles != int64(0) { - dest.WriteObjectField("rejectedProfiles") - dest.WriteInt64(orig.RejectedProfiles) - } - if orig.ErrorMessage != "" { - dest.WriteObjectField("errorMessage") - dest.WriteString(orig.ErrorMessage) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigExportPartialSuccess unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "rejectedProfiles", "rejected_profiles": - orig.RejectedProfiles = iter.ReadInt64() - case "errorMessage", "error_message": - orig.ErrorMessage = iter.ReadString() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess) int { - var n int - var l int - _ = l - if orig.RejectedProfiles != 0 { - n += 1 + proto.Sov(uint64(orig.RejectedProfiles)) - } - l = len(orig.ErrorMessage) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, buf []byte) int { - pos := len(buf) - var l int - _ = l - if orig.RejectedProfiles != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedProfiles)) - pos-- - buf[pos] = 0x8 - } - l = len(orig.ErrorMessage) - if l > 0 { - pos -= l - copy(buf[pos:], orig.ErrorMessage) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field RejectedProfiles", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.RejectedProfiles = int64(num) - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.ErrorMessage = string(buf[startPos:pos]) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesservicerequest.go index 0cf9fdc9f21..8533626bebe 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesservicerequest.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesservicerequest.go @@ -6,206 +6,23 @@ package internal -import ( - "fmt" - "sync" - - otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -type Profiles struct { - orig *otlpcollectorprofiles.ExportProfilesServiceRequest +type ProfilesWrapper struct { + orig *ExportProfilesServiceRequest state *State } -func GetOrigProfiles(ms Profiles) *otlpcollectorprofiles.ExportProfilesServiceRequest { +func GetProfilesOrig(ms ProfilesWrapper) *ExportProfilesServiceRequest { return ms.orig } -func GetProfilesState(ms Profiles) *State { +func GetProfilesState(ms ProfilesWrapper) *State { return ms.state } -func NewProfiles(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, state *State) Profiles { - return Profiles{orig: orig, state: state} -} - -var ( - protoPoolExportProfilesServiceRequest = sync.Pool{ - New: func() any { - return &otlpcollectorprofiles.ExportProfilesServiceRequest{} - }, - } -) - -func NewOrigExportProfilesServiceRequest() *otlpcollectorprofiles.ExportProfilesServiceRequest { - if !UseProtoPooling.IsEnabled() { - return &otlpcollectorprofiles.ExportProfilesServiceRequest{} - } - return protoPoolExportProfilesServiceRequest.Get().(*otlpcollectorprofiles.ExportProfilesServiceRequest) +func NewProfilesWrapper(orig *ExportProfilesServiceRequest, state *State) ProfilesWrapper { + return ProfilesWrapper{orig: orig, state: state} } -func DeleteOrigExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.ResourceProfiles { - DeleteOrigResourceProfiles(orig.ResourceProfiles[i], true) - } - DeleteOrigProfilesDictionary(&orig.Dictionary, false) - - orig.Reset() - if nullable { - protoPoolExportProfilesServiceRequest.Put(orig) - } -} - -func CopyOrigExportProfilesServiceRequest(dest, src *otlpcollectorprofiles.ExportProfilesServiceRequest) { - // If copying to same object, just return. - if src == dest { - return - } - dest.ResourceProfiles = CopyOrigResourceProfilesSlice(dest.ResourceProfiles, src.ResourceProfiles) - CopyOrigProfilesDictionary(&dest.Dictionary, &src.Dictionary) -} - -func GenTestOrigExportProfilesServiceRequest() *otlpcollectorprofiles.ExportProfilesServiceRequest { - orig := NewOrigExportProfilesServiceRequest() - orig.ResourceProfiles = GenerateOrigTestResourceProfilesSlice() - orig.Dictionary = *GenTestOrigProfilesDictionary() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, dest *json.Stream) { - dest.WriteObjectStart() - if len(orig.ResourceProfiles) > 0 { - dest.WriteObjectField("resourceProfiles") - dest.WriteArrayStart() - MarshalJSONOrigResourceProfiles(orig.ResourceProfiles[0], dest) - for i := 1; i < len(orig.ResourceProfiles); i++ { - dest.WriteMore() - MarshalJSONOrigResourceProfiles(orig.ResourceProfiles[i], dest) - } - dest.WriteArrayEnd() - } - dest.WriteObjectField("dictionary") - MarshalJSONOrigProfilesDictionary(&orig.Dictionary, dest) - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigProfiles unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "resourceProfiles", "resource_profiles": - for iter.ReadArray() { - orig.ResourceProfiles = append(orig.ResourceProfiles, NewOrigResourceProfiles()) - UnmarshalJSONOrigResourceProfiles(orig.ResourceProfiles[len(orig.ResourceProfiles)-1], iter) - } - - case "dictionary": - UnmarshalJSONOrigProfilesDictionary(&orig.Dictionary, iter) - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest) int { - var n int - var l int - _ = l - for i := range orig.ResourceProfiles { - l = SizeProtoOrigResourceProfiles(orig.ResourceProfiles[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - l = SizeProtoOrigProfilesDictionary(&orig.Dictionary) - n += 1 + proto.Sov(uint64(l)) + l - return n -} - -func MarshalProtoOrigExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, buf []byte) int { - pos := len(buf) - var l int - _ = l - for i := len(orig.ResourceProfiles) - 1; i >= 0; i-- { - l = MarshalProtoOrigResourceProfiles(orig.ResourceProfiles[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - - l = MarshalProtoOrigProfilesDictionary(&orig.Dictionary, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - - return len(buf) - pos -} - -func UnmarshalProtoOrigExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.ResourceProfiles = append(orig.ResourceProfiles, NewOrigResourceProfiles()) - err = UnmarshalProtoOrigResourceProfiles(orig.ResourceProfiles[len(orig.ResourceProfiles)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigProfilesDictionary(&orig.Dictionary, buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil +func GenTestProfilesWrapper() ProfilesWrapper { + return NewProfilesWrapper(GenTestExportProfilesServiceRequest(), NewState()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesserviceresponse.go deleted file mode 100644 index 57f666fb0a4..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesserviceresponse.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolExportProfilesServiceResponse = sync.Pool{ - New: func() any { - return &otlpcollectorprofiles.ExportProfilesServiceResponse{} - }, - } -) - -func NewOrigExportProfilesServiceResponse() *otlpcollectorprofiles.ExportProfilesServiceResponse { - if !UseProtoPooling.IsEnabled() { - return &otlpcollectorprofiles.ExportProfilesServiceResponse{} - } - return protoPoolExportProfilesServiceResponse.Get().(*otlpcollectorprofiles.ExportProfilesServiceResponse) -} - -func DeleteOrigExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigExportProfilesPartialSuccess(&orig.PartialSuccess, false) - - orig.Reset() - if nullable { - protoPoolExportProfilesServiceResponse.Put(orig) - } -} - -func CopyOrigExportProfilesServiceResponse(dest, src *otlpcollectorprofiles.ExportProfilesServiceResponse) { - // If copying to same object, just return. - if src == dest { - return - } - CopyOrigExportProfilesPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess) -} - -func GenTestOrigExportProfilesServiceResponse() *otlpcollectorprofiles.ExportProfilesServiceResponse { - orig := NewOrigExportProfilesServiceResponse() - orig.PartialSuccess = *GenTestOrigExportProfilesPartialSuccess() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, dest *json.Stream) { - dest.WriteObjectStart() - dest.WriteObjectField("partialSuccess") - MarshalJSONOrigExportProfilesPartialSuccess(&orig.PartialSuccess, dest) - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigExportResponse unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "partialSuccess", "partial_success": - UnmarshalJSONOrigExportProfilesPartialSuccess(&orig.PartialSuccess, iter) - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse) int { - var n int - var l int - _ = l - l = SizeProtoOrigExportProfilesPartialSuccess(&orig.PartialSuccess) - n += 1 + proto.Sov(uint64(l)) + l - return n -} - -func MarshalProtoOrigExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, buf []byte) int { - pos := len(buf) - var l int - _ = l - - l = MarshalProtoOrigExportProfilesPartialSuccess(&orig.PartialSuccess, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - return len(buf) - pos -} - -func UnmarshalProtoOrigExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigExportProfilesPartialSuccess(&orig.PartialSuccess, buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttracepartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttracepartialsuccess.go deleted file mode 100644 index bd9774602a4..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttracepartialsuccess.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolExportTracePartialSuccess = sync.Pool{ - New: func() any { - return &otlpcollectortrace.ExportTracePartialSuccess{} - }, - } -) - -func NewOrigExportTracePartialSuccess() *otlpcollectortrace.ExportTracePartialSuccess { - if !UseProtoPooling.IsEnabled() { - return &otlpcollectortrace.ExportTracePartialSuccess{} - } - return protoPoolExportTracePartialSuccess.Get().(*otlpcollectortrace.ExportTracePartialSuccess) -} - -func DeleteOrigExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - orig.Reset() - if nullable { - protoPoolExportTracePartialSuccess.Put(orig) - } -} - -func CopyOrigExportTracePartialSuccess(dest, src *otlpcollectortrace.ExportTracePartialSuccess) { - // If copying to same object, just return. - if src == dest { - return - } - dest.RejectedSpans = src.RejectedSpans - dest.ErrorMessage = src.ErrorMessage -} - -func GenTestOrigExportTracePartialSuccess() *otlpcollectortrace.ExportTracePartialSuccess { - orig := NewOrigExportTracePartialSuccess() - orig.RejectedSpans = int64(13) - orig.ErrorMessage = "test_errormessage" - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, dest *json.Stream) { - dest.WriteObjectStart() - if orig.RejectedSpans != int64(0) { - dest.WriteObjectField("rejectedSpans") - dest.WriteInt64(orig.RejectedSpans) - } - if orig.ErrorMessage != "" { - dest.WriteObjectField("errorMessage") - dest.WriteString(orig.ErrorMessage) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigExportPartialSuccess unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "rejectedSpans", "rejected_spans": - orig.RejectedSpans = iter.ReadInt64() - case "errorMessage", "error_message": - orig.ErrorMessage = iter.ReadString() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess) int { - var n int - var l int - _ = l - if orig.RejectedSpans != 0 { - n += 1 + proto.Sov(uint64(orig.RejectedSpans)) - } - l = len(orig.ErrorMessage) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, buf []byte) int { - pos := len(buf) - var l int - _ = l - if orig.RejectedSpans != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedSpans)) - pos-- - buf[pos] = 0x8 - } - l = len(orig.ErrorMessage) - if l > 0 { - pos -= l - copy(buf[pos:], orig.ErrorMessage) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field RejectedSpans", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.RejectedSpans = int64(num) - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.ErrorMessage = string(buf[startPos:pos]) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceservicerequest.go index 4ce9efc1e3d..75d82979d88 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceservicerequest.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceservicerequest.go @@ -6,174 +6,23 @@ package internal -import ( - "fmt" - "sync" - - otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -type Traces struct { - orig *otlpcollectortrace.ExportTraceServiceRequest +type TracesWrapper struct { + orig *ExportTraceServiceRequest state *State } -func GetOrigTraces(ms Traces) *otlpcollectortrace.ExportTraceServiceRequest { +func GetTracesOrig(ms TracesWrapper) *ExportTraceServiceRequest { return ms.orig } -func GetTracesState(ms Traces) *State { +func GetTracesState(ms TracesWrapper) *State { return ms.state } -func NewTraces(orig *otlpcollectortrace.ExportTraceServiceRequest, state *State) Traces { - return Traces{orig: orig, state: state} +func NewTracesWrapper(orig *ExportTraceServiceRequest, state *State) TracesWrapper { + return TracesWrapper{orig: orig, state: state} } -var ( - protoPoolExportTraceServiceRequest = sync.Pool{ - New: func() any { - return &otlpcollectortrace.ExportTraceServiceRequest{} - }, - } -) - -func NewOrigExportTraceServiceRequest() *otlpcollectortrace.ExportTraceServiceRequest { - if !UseProtoPooling.IsEnabled() { - return &otlpcollectortrace.ExportTraceServiceRequest{} - } - return protoPoolExportTraceServiceRequest.Get().(*otlpcollectortrace.ExportTraceServiceRequest) -} - -func DeleteOrigExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.ResourceSpans { - DeleteOrigResourceSpans(orig.ResourceSpans[i], true) - } - - orig.Reset() - if nullable { - protoPoolExportTraceServiceRequest.Put(orig) - } -} - -func CopyOrigExportTraceServiceRequest(dest, src *otlpcollectortrace.ExportTraceServiceRequest) { - // If copying to same object, just return. - if src == dest { - return - } - dest.ResourceSpans = CopyOrigResourceSpansSlice(dest.ResourceSpans, src.ResourceSpans) -} - -func GenTestOrigExportTraceServiceRequest() *otlpcollectortrace.ExportTraceServiceRequest { - orig := NewOrigExportTraceServiceRequest() - orig.ResourceSpans = GenerateOrigTestResourceSpansSlice() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, dest *json.Stream) { - dest.WriteObjectStart() - if len(orig.ResourceSpans) > 0 { - dest.WriteObjectField("resourceSpans") - dest.WriteArrayStart() - MarshalJSONOrigResourceSpans(orig.ResourceSpans[0], dest) - for i := 1; i < len(orig.ResourceSpans); i++ { - dest.WriteMore() - MarshalJSONOrigResourceSpans(orig.ResourceSpans[i], dest) - } - dest.WriteArrayEnd() - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigTraces unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "resourceSpans", "resource_spans": - for iter.ReadArray() { - orig.ResourceSpans = append(orig.ResourceSpans, NewOrigResourceSpans()) - UnmarshalJSONOrigResourceSpans(orig.ResourceSpans[len(orig.ResourceSpans)-1], iter) - } - - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest) int { - var n int - var l int - _ = l - for i := range orig.ResourceSpans { - l = SizeProtoOrigResourceSpans(orig.ResourceSpans[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, buf []byte) int { - pos := len(buf) - var l int - _ = l - for i := len(orig.ResourceSpans) - 1; i >= 0; i-- { - l = MarshalProtoOrigResourceSpans(orig.ResourceSpans[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - return len(buf) - pos -} - -func UnmarshalProtoOrigExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.ResourceSpans = append(orig.ResourceSpans, NewOrigResourceSpans()) - err = UnmarshalProtoOrigResourceSpans(orig.ResourceSpans[len(orig.ResourceSpans)-1], buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil +func GenTestTracesWrapper() TracesWrapper { + return NewTracesWrapper(GenTestExportTraceServiceRequest(), NewState()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceserviceresponse.go deleted file mode 100644 index 293386b3090..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceserviceresponse.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolExportTraceServiceResponse = sync.Pool{ - New: func() any { - return &otlpcollectortrace.ExportTraceServiceResponse{} - }, - } -) - -func NewOrigExportTraceServiceResponse() *otlpcollectortrace.ExportTraceServiceResponse { - if !UseProtoPooling.IsEnabled() { - return &otlpcollectortrace.ExportTraceServiceResponse{} - } - return protoPoolExportTraceServiceResponse.Get().(*otlpcollectortrace.ExportTraceServiceResponse) -} - -func DeleteOrigExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigExportTracePartialSuccess(&orig.PartialSuccess, false) - - orig.Reset() - if nullable { - protoPoolExportTraceServiceResponse.Put(orig) - } -} - -func CopyOrigExportTraceServiceResponse(dest, src *otlpcollectortrace.ExportTraceServiceResponse) { - // If copying to same object, just return. - if src == dest { - return - } - CopyOrigExportTracePartialSuccess(&dest.PartialSuccess, &src.PartialSuccess) -} - -func GenTestOrigExportTraceServiceResponse() *otlpcollectortrace.ExportTraceServiceResponse { - orig := NewOrigExportTraceServiceResponse() - orig.PartialSuccess = *GenTestOrigExportTracePartialSuccess() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, dest *json.Stream) { - dest.WriteObjectStart() - dest.WriteObjectField("partialSuccess") - MarshalJSONOrigExportTracePartialSuccess(&orig.PartialSuccess, dest) - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigExportResponse unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "partialSuccess", "partial_success": - UnmarshalJSONOrigExportTracePartialSuccess(&orig.PartialSuccess, iter) - default: - iter.Skip() - } - } -} - -func SizeProtoOrigExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse) int { - var n int - var l int - _ = l - l = SizeProtoOrigExportTracePartialSuccess(&orig.PartialSuccess) - n += 1 + proto.Sov(uint64(l)) + l - return n -} - -func MarshalProtoOrigExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, buf []byte) int { - pos := len(buf) - var l int - _ = l - - l = MarshalProtoOrigExportTracePartialSuccess(&orig.PartialSuccess, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - return len(buf) - pos -} - -func UnmarshalProtoOrigExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigExportTracePartialSuccess(&orig.PartialSuccess, buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go index 9f21f74ffa9..8d0d6ea6cf8 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go @@ -6,32 +6,28 @@ package internal -type Float64Slice struct { +type Float64SliceWrapper struct { orig *[]float64 state *State } -func GetOrigFloat64Slice(ms Float64Slice) *[]float64 { +func GetFloat64SliceOrig(ms Float64SliceWrapper) *[]float64 { return ms.orig } -func GetFloat64SliceState(ms Float64Slice) *State { +func GetFloat64SliceState(ms Float64SliceWrapper) *State { return ms.state } -func NewFloat64Slice(orig *[]float64, state *State) Float64Slice { - return Float64Slice{orig: orig, state: state} +func NewFloat64SliceWrapper(orig *[]float64, state *State) Float64SliceWrapper { + return Float64SliceWrapper{orig: orig, state: state} } -func GenerateTestFloat64Slice() Float64Slice { - orig := GenerateOrigTestFloat64Slice() - return NewFloat64Slice(&orig, NewState()) +func GenTestFloat64SliceWrapper() Float64SliceWrapper { + orig := []float64{1.1, 2.2, 3.3} + return NewFloat64SliceWrapper(&orig, NewState()) } -func CopyOrigFloat64Slice(dst, src []float64) []float64 { - return append(dst[:0], src...) -} - -func GenerateOrigTestFloat64Slice() []float64 { +func GenTestFloat64Slice() []float64 { return []float64{1.1, 2.2, 3.3} } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_functionslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_functionslice.go deleted file mode 100644 index 4295cf2167d..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_functionslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - -func CopyOrigFunctionSlice(dest, src []*otlpprofiles.Function) []*otlpprofiles.Function { - var newDest []*otlpprofiles.Function - if cap(dest) < len(src) { - newDest = make([]*otlpprofiles.Function, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigFunction() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigFunction(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigFunction() - } - } - for i := range src { - CopyOrigFunction(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestFunctionSlice() []*otlpprofiles.Function { - orig := make([]*otlpprofiles.Function, 5) - orig[0] = NewOrigFunction() - orig[1] = GenTestOrigFunction() - orig[2] = NewOrigFunction() - orig[3] = GenTestOrigFunction() - orig[4] = NewOrigFunction() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_gauge.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_gauge.go deleted file mode 100644 index 7fd96f46480..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_gauge.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolGauge = sync.Pool{ - New: func() any { - return &otlpmetrics.Gauge{} - }, - } -) - -func NewOrigGauge() *otlpmetrics.Gauge { - if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.Gauge{} - } - return protoPoolGauge.Get().(*otlpmetrics.Gauge) -} - -func DeleteOrigGauge(orig *otlpmetrics.Gauge, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.DataPoints { - DeleteOrigNumberDataPoint(orig.DataPoints[i], true) - } - - orig.Reset() - if nullable { - protoPoolGauge.Put(orig) - } -} - -func CopyOrigGauge(dest, src *otlpmetrics.Gauge) { - // If copying to same object, just return. - if src == dest { - return - } - dest.DataPoints = CopyOrigNumberDataPointSlice(dest.DataPoints, src.DataPoints) -} - -func GenTestOrigGauge() *otlpmetrics.Gauge { - orig := NewOrigGauge() - orig.DataPoints = GenerateOrigTestNumberDataPointSlice() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigGauge(orig *otlpmetrics.Gauge, dest *json.Stream) { - dest.WriteObjectStart() - if len(orig.DataPoints) > 0 { - dest.WriteObjectField("dataPoints") - dest.WriteArrayStart() - MarshalJSONOrigNumberDataPoint(orig.DataPoints[0], dest) - for i := 1; i < len(orig.DataPoints); i++ { - dest.WriteMore() - MarshalJSONOrigNumberDataPoint(orig.DataPoints[i], dest) - } - dest.WriteArrayEnd() - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigGauge unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigGauge(orig *otlpmetrics.Gauge, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "dataPoints", "data_points": - for iter.ReadArray() { - orig.DataPoints = append(orig.DataPoints, NewOrigNumberDataPoint()) - UnmarshalJSONOrigNumberDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter) - } - - default: - iter.Skip() - } - } -} - -func SizeProtoOrigGauge(orig *otlpmetrics.Gauge) int { - var n int - var l int - _ = l - for i := range orig.DataPoints { - l = SizeProtoOrigNumberDataPoint(orig.DataPoints[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigGauge(orig *otlpmetrics.Gauge, buf []byte) int { - pos := len(buf) - var l int - _ = l - for i := len(orig.DataPoints) - 1; i >= 0; i-- { - l = MarshalProtoOrigNumberDataPoint(orig.DataPoints[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - return len(buf) - pos -} - -func UnmarshalProtoOrigGauge(orig *otlpmetrics.Gauge, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.DataPoints = append(orig.DataPoints, NewOrigNumberDataPoint()) - err = UnmarshalProtoOrigNumberDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogram.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogram.go deleted file mode 100644 index 006c222bfea..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogram.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolHistogram = sync.Pool{ - New: func() any { - return &otlpmetrics.Histogram{} - }, - } -) - -func NewOrigHistogram() *otlpmetrics.Histogram { - if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.Histogram{} - } - return protoPoolHistogram.Get().(*otlpmetrics.Histogram) -} - -func DeleteOrigHistogram(orig *otlpmetrics.Histogram, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.DataPoints { - DeleteOrigHistogramDataPoint(orig.DataPoints[i], true) - } - - orig.Reset() - if nullable { - protoPoolHistogram.Put(orig) - } -} - -func CopyOrigHistogram(dest, src *otlpmetrics.Histogram) { - // If copying to same object, just return. - if src == dest { - return - } - dest.DataPoints = CopyOrigHistogramDataPointSlice(dest.DataPoints, src.DataPoints) - dest.AggregationTemporality = src.AggregationTemporality -} - -func GenTestOrigHistogram() *otlpmetrics.Histogram { - orig := NewOrigHistogram() - orig.DataPoints = GenerateOrigTestHistogramDataPointSlice() - orig.AggregationTemporality = otlpmetrics.AggregationTemporality(1) - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigHistogram(orig *otlpmetrics.Histogram, dest *json.Stream) { - dest.WriteObjectStart() - if len(orig.DataPoints) > 0 { - dest.WriteObjectField("dataPoints") - dest.WriteArrayStart() - MarshalJSONOrigHistogramDataPoint(orig.DataPoints[0], dest) - for i := 1; i < len(orig.DataPoints); i++ { - dest.WriteMore() - MarshalJSONOrigHistogramDataPoint(orig.DataPoints[i], dest) - } - dest.WriteArrayEnd() - } - - if int32(orig.AggregationTemporality) != 0 { - dest.WriteObjectField("aggregationTemporality") - dest.WriteInt32(int32(orig.AggregationTemporality)) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigHistogram unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigHistogram(orig *otlpmetrics.Histogram, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "dataPoints", "data_points": - for iter.ReadArray() { - orig.DataPoints = append(orig.DataPoints, NewOrigHistogramDataPoint()) - UnmarshalJSONOrigHistogramDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter) - } - - case "aggregationTemporality", "aggregation_temporality": - orig.AggregationTemporality = otlpmetrics.AggregationTemporality(iter.ReadEnumValue(otlpmetrics.AggregationTemporality_value)) - default: - iter.Skip() - } - } -} - -func SizeProtoOrigHistogram(orig *otlpmetrics.Histogram) int { - var n int - var l int - _ = l - for i := range orig.DataPoints { - l = SizeProtoOrigHistogramDataPoint(orig.DataPoints[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - if orig.AggregationTemporality != 0 { - n += 1 + proto.Sov(uint64(orig.AggregationTemporality)) - } - return n -} - -func MarshalProtoOrigHistogram(orig *otlpmetrics.Histogram, buf []byte) int { - pos := len(buf) - var l int - _ = l - for i := len(orig.DataPoints) - 1; i >= 0; i-- { - l = MarshalProtoOrigHistogramDataPoint(orig.DataPoints[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - if orig.AggregationTemporality != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality)) - pos-- - buf[pos] = 0x10 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigHistogram(orig *otlpmetrics.Histogram, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.DataPoints = append(orig.DataPoints, NewOrigHistogramDataPoint()) - err = UnmarshalProtoOrigHistogramDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.AggregationTemporality = otlpmetrics.AggregationTemporality(num) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogramdatapointslice.go deleted file mode 100644 index 210f02eab75..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogramdatapointslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" -) - -func CopyOrigHistogramDataPointSlice(dest, src []*otlpmetrics.HistogramDataPoint) []*otlpmetrics.HistogramDataPoint { - var newDest []*otlpmetrics.HistogramDataPoint - if cap(dest) < len(src) { - newDest = make([]*otlpmetrics.HistogramDataPoint, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigHistogramDataPoint() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigHistogramDataPoint(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigHistogramDataPoint() - } - } - for i := range src { - CopyOrigHistogramDataPoint(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestHistogramDataPointSlice() []*otlpmetrics.HistogramDataPoint { - orig := make([]*otlpmetrics.HistogramDataPoint, 5) - orig[0] = NewOrigHistogramDataPoint() - orig[1] = GenTestOrigHistogramDataPoint() - orig[2] = NewOrigHistogramDataPoint() - orig[3] = GenTestOrigHistogramDataPoint() - orig[4] = NewOrigHistogramDataPoint() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go index f9fea6cb854..5fbe72e12ec 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go @@ -6,266 +6,23 @@ package internal -import ( - "fmt" - "sync" - - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -type InstrumentationScope struct { - orig *otlpcommon.InstrumentationScope +type InstrumentationScopeWrapper struct { + orig *InstrumentationScope state *State } -func GetOrigInstrumentationScope(ms InstrumentationScope) *otlpcommon.InstrumentationScope { +func GetInstrumentationScopeOrig(ms InstrumentationScopeWrapper) *InstrumentationScope { return ms.orig } -func GetInstrumentationScopeState(ms InstrumentationScope) *State { +func GetInstrumentationScopeState(ms InstrumentationScopeWrapper) *State { return ms.state } -func NewInstrumentationScope(orig *otlpcommon.InstrumentationScope, state *State) InstrumentationScope { - return InstrumentationScope{orig: orig, state: state} -} - -var ( - protoPoolInstrumentationScope = sync.Pool{ - New: func() any { - return &otlpcommon.InstrumentationScope{} - }, - } -) - -func NewOrigInstrumentationScope() *otlpcommon.InstrumentationScope { - if !UseProtoPooling.IsEnabled() { - return &otlpcommon.InstrumentationScope{} - } - return protoPoolInstrumentationScope.Get().(*otlpcommon.InstrumentationScope) +func NewInstrumentationScopeWrapper(orig *InstrumentationScope, state *State) InstrumentationScopeWrapper { + return InstrumentationScopeWrapper{orig: orig, state: state} } -func DeleteOrigInstrumentationScope(orig *otlpcommon.InstrumentationScope, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.Attributes { - DeleteOrigKeyValue(&orig.Attributes[i], false) - } - - orig.Reset() - if nullable { - protoPoolInstrumentationScope.Put(orig) - } -} - -func CopyOrigInstrumentationScope(dest, src *otlpcommon.InstrumentationScope) { - // If copying to same object, just return. - if src == dest { - return - } - dest.Name = src.Name - dest.Version = src.Version - dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes) - dest.DroppedAttributesCount = src.DroppedAttributesCount -} - -func GenTestOrigInstrumentationScope() *otlpcommon.InstrumentationScope { - orig := NewOrigInstrumentationScope() - orig.Name = "test_name" - orig.Version = "test_version" - orig.Attributes = GenerateOrigTestKeyValueSlice() - orig.DroppedAttributesCount = uint32(13) - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigInstrumentationScope(orig *otlpcommon.InstrumentationScope, dest *json.Stream) { - dest.WriteObjectStart() - if orig.Name != "" { - dest.WriteObjectField("name") - dest.WriteString(orig.Name) - } - if orig.Version != "" { - dest.WriteObjectField("version") - dest.WriteString(orig.Version) - } - if len(orig.Attributes) > 0 { - dest.WriteObjectField("attributes") - dest.WriteArrayStart() - MarshalJSONOrigKeyValue(&orig.Attributes[0], dest) - for i := 1; i < len(orig.Attributes); i++ { - dest.WriteMore() - MarshalJSONOrigKeyValue(&orig.Attributes[i], dest) - } - dest.WriteArrayEnd() - } - if orig.DroppedAttributesCount != uint32(0) { - dest.WriteObjectField("droppedAttributesCount") - dest.WriteUint32(orig.DroppedAttributesCount) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigInstrumentationScope unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigInstrumentationScope(orig *otlpcommon.InstrumentationScope, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "name": - orig.Name = iter.ReadString() - case "version": - orig.Version = iter.ReadString() - case "attributes": - for iter.ReadArray() { - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter) - } - - case "droppedAttributesCount", "dropped_attributes_count": - orig.DroppedAttributesCount = iter.ReadUint32() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigInstrumentationScope(orig *otlpcommon.InstrumentationScope) int { - var n int - var l int - _ = l - l = len(orig.Name) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - l = len(orig.Version) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - for i := range orig.Attributes { - l = SizeProtoOrigKeyValue(&orig.Attributes[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - if orig.DroppedAttributesCount != 0 { - n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount)) - } - return n -} - -func MarshalProtoOrigInstrumentationScope(orig *otlpcommon.InstrumentationScope, buf []byte) int { - pos := len(buf) - var l int - _ = l - l = len(orig.Name) - if l > 0 { - pos -= l - copy(buf[pos:], orig.Name) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - l = len(orig.Version) - if l > 0 { - pos -= l - copy(buf[pos:], orig.Version) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - for i := len(orig.Attributes) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x1a - } - if orig.DroppedAttributesCount != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount)) - pos-- - buf[pos] = 0x20 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigInstrumentationScope(orig *otlpcommon.InstrumentationScope, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Name = string(buf[startPos:pos]) - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Version = string(buf[startPos:pos]) - - case 3: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 4: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.DroppedAttributesCount = uint32(num) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil +func GenTestInstrumentationScopeWrapper() InstrumentationScopeWrapper { + return NewInstrumentationScopeWrapper(GenTestInstrumentationScope(), NewState()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go index 86ceaa8c50a..ed66b200738 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go @@ -6,32 +6,28 @@ package internal -type Int32Slice struct { +type Int32SliceWrapper struct { orig *[]int32 state *State } -func GetOrigInt32Slice(ms Int32Slice) *[]int32 { +func GetInt32SliceOrig(ms Int32SliceWrapper) *[]int32 { return ms.orig } -func GetInt32SliceState(ms Int32Slice) *State { +func GetInt32SliceState(ms Int32SliceWrapper) *State { return ms.state } -func NewInt32Slice(orig *[]int32, state *State) Int32Slice { - return Int32Slice{orig: orig, state: state} +func NewInt32SliceWrapper(orig *[]int32, state *State) Int32SliceWrapper { + return Int32SliceWrapper{orig: orig, state: state} } -func GenerateTestInt32Slice() Int32Slice { - orig := GenerateOrigTestInt32Slice() - return NewInt32Slice(&orig, NewState()) +func GenTestInt32SliceWrapper() Int32SliceWrapper { + orig := []int32{1, 2, 3} + return NewInt32SliceWrapper(&orig, NewState()) } -func CopyOrigInt32Slice(dst, src []int32) []int32 { - return append(dst[:0], src...) -} - -func GenerateOrigTestInt32Slice() []int32 { +func GenTestInt32Slice() []int32 { return []int32{1, 2, 3} } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int64slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int64slice.go index ec4df4f49ee..9174f8632a7 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int64slice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int64slice.go @@ -6,32 +6,28 @@ package internal -type Int64Slice struct { +type Int64SliceWrapper struct { orig *[]int64 state *State } -func GetOrigInt64Slice(ms Int64Slice) *[]int64 { +func GetInt64SliceOrig(ms Int64SliceWrapper) *[]int64 { return ms.orig } -func GetInt64SliceState(ms Int64Slice) *State { +func GetInt64SliceState(ms Int64SliceWrapper) *State { return ms.state } -func NewInt64Slice(orig *[]int64, state *State) Int64Slice { - return Int64Slice{orig: orig, state: state} +func NewInt64SliceWrapper(orig *[]int64, state *State) Int64SliceWrapper { + return Int64SliceWrapper{orig: orig, state: state} } -func GenerateTestInt64Slice() Int64Slice { - orig := GenerateOrigTestInt64Slice() - return NewInt64Slice(&orig, NewState()) +func GenTestInt64SliceWrapper() Int64SliceWrapper { + orig := []int64{1, 2, 3} + return NewInt64SliceWrapper(&orig, NewState()) } -func CopyOrigInt64Slice(dst, src []int64) []int64 { - return append(dst[:0], src...) -} - -func GenerateOrigTestInt64Slice() []int64 { +func GenTestInt64Slice() []int64 { return []int64{1, 2, 3} } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalue.go deleted file mode 100644 index 6150f448fa4..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalue.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolKeyValue = sync.Pool{ - New: func() any { - return &otlpcommon.KeyValue{} - }, - } -) - -func NewOrigKeyValue() *otlpcommon.KeyValue { - if !UseProtoPooling.IsEnabled() { - return &otlpcommon.KeyValue{} - } - return protoPoolKeyValue.Get().(*otlpcommon.KeyValue) -} - -func DeleteOrigKeyValue(orig *otlpcommon.KeyValue, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigAnyValue(&orig.Value, false) - - orig.Reset() - if nullable { - protoPoolKeyValue.Put(orig) - } -} - -func CopyOrigKeyValue(dest, src *otlpcommon.KeyValue) { - // If copying to same object, just return. - if src == dest { - return - } - dest.Key = src.Key - CopyOrigAnyValue(&dest.Value, &src.Value) -} - -func GenTestOrigKeyValue() *otlpcommon.KeyValue { - orig := NewOrigKeyValue() - orig.Key = "test_key" - orig.Value = *GenTestOrigAnyValue() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigKeyValue(orig *otlpcommon.KeyValue, dest *json.Stream) { - dest.WriteObjectStart() - if orig.Key != "" { - dest.WriteObjectField("key") - dest.WriteString(orig.Key) - } - dest.WriteObjectField("value") - MarshalJSONOrigAnyValue(&orig.Value, dest) - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigAttribute unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigKeyValue(orig *otlpcommon.KeyValue, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "key": - orig.Key = iter.ReadString() - case "value": - UnmarshalJSONOrigAnyValue(&orig.Value, iter) - default: - iter.Skip() - } - } -} - -func SizeProtoOrigKeyValue(orig *otlpcommon.KeyValue) int { - var n int - var l int - _ = l - l = len(orig.Key) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - l = SizeProtoOrigAnyValue(&orig.Value) - n += 1 + proto.Sov(uint64(l)) + l - return n -} - -func MarshalProtoOrigKeyValue(orig *otlpcommon.KeyValue, buf []byte) int { - pos := len(buf) - var l int - _ = l - l = len(orig.Key) - if l > 0 { - pos -= l - copy(buf[pos:], orig.Key) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - - l = MarshalProtoOrigAnyValue(&orig.Value, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - - return len(buf) - pos -} - -func UnmarshalProtoOrigKeyValue(orig *otlpcommon.KeyValue, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Key = string(buf[startPos:pos]) - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigAnyValue(&orig.Value, buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalueandunit.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalueandunit.go deleted file mode 100644 index d8bccf5c8ba..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalueandunit.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolKeyValueAndUnit = sync.Pool{ - New: func() any { - return &otlpprofiles.KeyValueAndUnit{} - }, - } -) - -func NewOrigKeyValueAndUnit() *otlpprofiles.KeyValueAndUnit { - if !UseProtoPooling.IsEnabled() { - return &otlpprofiles.KeyValueAndUnit{} - } - return protoPoolKeyValueAndUnit.Get().(*otlpprofiles.KeyValueAndUnit) -} - -func DeleteOrigKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigAnyValue(&orig.Value, false) - - orig.Reset() - if nullable { - protoPoolKeyValueAndUnit.Put(orig) - } -} - -func CopyOrigKeyValueAndUnit(dest, src *otlpprofiles.KeyValueAndUnit) { - // If copying to same object, just return. - if src == dest { - return - } - dest.KeyStrindex = src.KeyStrindex - CopyOrigAnyValue(&dest.Value, &src.Value) - dest.UnitStrindex = src.UnitStrindex -} - -func GenTestOrigKeyValueAndUnit() *otlpprofiles.KeyValueAndUnit { - orig := NewOrigKeyValueAndUnit() - orig.KeyStrindex = int32(13) - orig.Value = *GenTestOrigAnyValue() - orig.UnitStrindex = int32(13) - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit, dest *json.Stream) { - dest.WriteObjectStart() - if orig.KeyStrindex != int32(0) { - dest.WriteObjectField("keyStrindex") - dest.WriteInt32(orig.KeyStrindex) - } - dest.WriteObjectField("value") - MarshalJSONOrigAnyValue(&orig.Value, dest) - if orig.UnitStrindex != int32(0) { - dest.WriteObjectField("unitStrindex") - dest.WriteInt32(orig.UnitStrindex) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigKeyValueAndUnit unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "keyStrindex", "key_strindex": - orig.KeyStrindex = iter.ReadInt32() - case "value": - UnmarshalJSONOrigAnyValue(&orig.Value, iter) - case "unitStrindex", "unit_strindex": - orig.UnitStrindex = iter.ReadInt32() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit) int { - var n int - var l int - _ = l - if orig.KeyStrindex != 0 { - n += 1 + proto.Sov(uint64(orig.KeyStrindex)) - } - l = SizeProtoOrigAnyValue(&orig.Value) - n += 1 + proto.Sov(uint64(l)) + l - if orig.UnitStrindex != 0 { - n += 1 + proto.Sov(uint64(orig.UnitStrindex)) - } - return n -} - -func MarshalProtoOrigKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit, buf []byte) int { - pos := len(buf) - var l int - _ = l - if orig.KeyStrindex != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.KeyStrindex)) - pos-- - buf[pos] = 0x8 - } - - l = MarshalProtoOrigAnyValue(&orig.Value, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - - if orig.UnitStrindex != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.UnitStrindex)) - pos-- - buf[pos] = 0x18 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field KeyStrindex", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.KeyStrindex = int32(num) - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigAnyValue(&orig.Value, buf[startPos:pos]) - if err != nil { - return err - } - - case 3: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.UnitStrindex = int32(num) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalueandunitslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalueandunitslice.go deleted file mode 100644 index 0a2a9109c96..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalueandunitslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - -func CopyOrigKeyValueAndUnitSlice(dest, src []*otlpprofiles.KeyValueAndUnit) []*otlpprofiles.KeyValueAndUnit { - var newDest []*otlpprofiles.KeyValueAndUnit - if cap(dest) < len(src) { - newDest = make([]*otlpprofiles.KeyValueAndUnit, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigKeyValueAndUnit() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigKeyValueAndUnit(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigKeyValueAndUnit() - } - } - for i := range src { - CopyOrigKeyValueAndUnit(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestKeyValueAndUnitSlice() []*otlpprofiles.KeyValueAndUnit { - orig := make([]*otlpprofiles.KeyValueAndUnit, 5) - orig[0] = NewOrigKeyValueAndUnit() - orig[1] = GenTestOrigKeyValueAndUnit() - orig[2] = NewOrigKeyValueAndUnit() - orig[3] = GenTestOrigKeyValueAndUnit() - orig[4] = NewOrigKeyValueAndUnit() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvaluelist.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvaluelist.go deleted file mode 100644 index eed254a6c23..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvaluelist.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolKeyValueList = sync.Pool{ - New: func() any { - return &otlpcommon.KeyValueList{} - }, - } -) - -func NewOrigKeyValueList() *otlpcommon.KeyValueList { - if !UseProtoPooling.IsEnabled() { - return &otlpcommon.KeyValueList{} - } - return protoPoolKeyValueList.Get().(*otlpcommon.KeyValueList) -} - -func DeleteOrigKeyValueList(orig *otlpcommon.KeyValueList, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.Values { - DeleteOrigKeyValue(&orig.Values[i], false) - } - - orig.Reset() - if nullable { - protoPoolKeyValueList.Put(orig) - } -} - -func CopyOrigKeyValueList(dest, src *otlpcommon.KeyValueList) { - // If copying to same object, just return. - if src == dest { - return - } - dest.Values = CopyOrigKeyValueSlice(dest.Values, src.Values) -} - -func GenTestOrigKeyValueList() *otlpcommon.KeyValueList { - orig := NewOrigKeyValueList() - orig.Values = GenerateOrigTestKeyValueSlice() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigKeyValueList(orig *otlpcommon.KeyValueList, dest *json.Stream) { - dest.WriteObjectStart() - if len(orig.Values) > 0 { - dest.WriteObjectField("values") - dest.WriteArrayStart() - MarshalJSONOrigKeyValue(&orig.Values[0], dest) - for i := 1; i < len(orig.Values); i++ { - dest.WriteMore() - MarshalJSONOrigKeyValue(&orig.Values[i], dest) - } - dest.WriteArrayEnd() - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigKeyValueList unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigKeyValueList(orig *otlpcommon.KeyValueList, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "values": - for iter.ReadArray() { - orig.Values = append(orig.Values, otlpcommon.KeyValue{}) - UnmarshalJSONOrigKeyValue(&orig.Values[len(orig.Values)-1], iter) - } - - default: - iter.Skip() - } - } -} - -func SizeProtoOrigKeyValueList(orig *otlpcommon.KeyValueList) int { - var n int - var l int - _ = l - for i := range orig.Values { - l = SizeProtoOrigKeyValue(&orig.Values[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigKeyValueList(orig *otlpcommon.KeyValueList, buf []byte) int { - pos := len(buf) - var l int - _ = l - for i := len(orig.Values) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValue(&orig.Values[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - return len(buf) - pos -} - -func UnmarshalProtoOrigKeyValueList(orig *otlpcommon.KeyValueList, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Values = append(orig.Values, otlpcommon.KeyValue{}) - err = UnmarshalProtoOrigKeyValue(&orig.Values[len(orig.Values)-1], buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalueslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalueslice.go deleted file mode 100644 index 5143ee1140b..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalueslice.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" -) - -func CopyOrigKeyValueSlice(dest, src []otlpcommon.KeyValue) []otlpcommon.KeyValue { - var newDest []otlpcommon.KeyValue - if cap(dest) < len(src) { - newDest = make([]otlpcommon.KeyValue, len(src)) - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigKeyValue(&dest[i], false) - } - } - for i := range src { - CopyOrigKeyValue(&newDest[i], &src[i]) - } - return newDest -} - -func GenerateOrigTestKeyValueSlice() []otlpcommon.KeyValue { - orig := make([]otlpcommon.KeyValue, 5) - orig[1] = *GenTestOrigKeyValue() - orig[3] = *GenTestOrigKeyValue() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_lineslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_lineslice.go deleted file mode 100644 index e43c1ababd4..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_lineslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - -func CopyOrigLineSlice(dest, src []*otlpprofiles.Line) []*otlpprofiles.Line { - var newDest []*otlpprofiles.Line - if cap(dest) < len(src) { - newDest = make([]*otlpprofiles.Line, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigLine() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigLine(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigLine() - } - } - for i := range src { - CopyOrigLine(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestLineSlice() []*otlpprofiles.Line { - orig := make([]*otlpprofiles.Line, 5) - orig[0] = NewOrigLine() - orig[1] = GenTestOrigLine() - orig[2] = NewOrigLine() - orig[3] = GenTestOrigLine() - orig[4] = NewOrigLine() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_link.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_link.go deleted file mode 100644 index 6fd64397270..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_link.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - "go.opentelemetry.io/collector/pdata/internal/data" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolLink = sync.Pool{ - New: func() any { - return &otlpprofiles.Link{} - }, - } -) - -func NewOrigLink() *otlpprofiles.Link { - if !UseProtoPooling.IsEnabled() { - return &otlpprofiles.Link{} - } - return protoPoolLink.Get().(*otlpprofiles.Link) -} - -func DeleteOrigLink(orig *otlpprofiles.Link, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigTraceID(&orig.TraceId, false) - DeleteOrigSpanID(&orig.SpanId, false) - - orig.Reset() - if nullable { - protoPoolLink.Put(orig) - } -} - -func CopyOrigLink(dest, src *otlpprofiles.Link) { - // If copying to same object, just return. - if src == dest { - return - } - dest.TraceId = src.TraceId - dest.SpanId = src.SpanId -} - -func GenTestOrigLink() *otlpprofiles.Link { - orig := NewOrigLink() - orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) - orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1}) - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigLink(orig *otlpprofiles.Link, dest *json.Stream) { - dest.WriteObjectStart() - if orig.TraceId != data.TraceID([16]byte{}) { - dest.WriteObjectField("traceId") - MarshalJSONOrigTraceID(&orig.TraceId, dest) - } - if orig.SpanId != data.SpanID([8]byte{}) { - dest.WriteObjectField("spanId") - MarshalJSONOrigSpanID(&orig.SpanId, dest) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigLink unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigLink(orig *otlpprofiles.Link, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "traceId", "trace_id": - UnmarshalJSONOrigTraceID(&orig.TraceId, iter) - case "spanId", "span_id": - UnmarshalJSONOrigSpanID(&orig.SpanId, iter) - default: - iter.Skip() - } - } -} - -func SizeProtoOrigLink(orig *otlpprofiles.Link) int { - var n int - var l int - _ = l - l = SizeProtoOrigTraceID(&orig.TraceId) - n += 1 + proto.Sov(uint64(l)) + l - l = SizeProtoOrigSpanID(&orig.SpanId) - n += 1 + proto.Sov(uint64(l)) + l - return n -} - -func MarshalProtoOrigLink(orig *otlpprofiles.Link, buf []byte) int { - pos := len(buf) - var l int - _ = l - - l = MarshalProtoOrigTraceID(&orig.TraceId, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - l = MarshalProtoOrigSpanID(&orig.SpanId, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - - return len(buf) - pos -} - -func UnmarshalProtoOrigLink(orig *otlpprofiles.Link, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigTraceID(&orig.TraceId, buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigSpanID(&orig.SpanId, buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_linkslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_linkslice.go deleted file mode 100644 index 27b981ec171..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_linkslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - -func CopyOrigLinkSlice(dest, src []*otlpprofiles.Link) []*otlpprofiles.Link { - var newDest []*otlpprofiles.Link - if cap(dest) < len(src) { - newDest = make([]*otlpprofiles.Link, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigLink() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigLink(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigLink() - } - } - for i := range src { - CopyOrigLink(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestLinkSlice() []*otlpprofiles.Link { - orig := make([]*otlpprofiles.Link, 5) - orig[0] = NewOrigLink() - orig[1] = GenTestOrigLink() - orig[2] = NewOrigLink() - orig[3] = GenTestOrigLink() - orig[4] = NewOrigLink() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_locationslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_locationslice.go deleted file mode 100644 index 83a10cc457b..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_locationslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - -func CopyOrigLocationSlice(dest, src []*otlpprofiles.Location) []*otlpprofiles.Location { - var newDest []*otlpprofiles.Location - if cap(dest) < len(src) { - newDest = make([]*otlpprofiles.Location, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigLocation() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigLocation(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigLocation() - } - } - for i := range src { - CopyOrigLocation(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestLocationSlice() []*otlpprofiles.Location { - orig := make([]*otlpprofiles.Location, 5) - orig[0] = NewOrigLocation() - orig[1] = GenTestOrigLocation() - orig[2] = NewOrigLocation() - orig[3] = GenTestOrigLocation() - orig[4] = NewOrigLocation() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_logrecordslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_logrecordslice.go deleted file mode 100644 index 12770cf5887..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_logrecordslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" -) - -func CopyOrigLogRecordSlice(dest, src []*otlplogs.LogRecord) []*otlplogs.LogRecord { - var newDest []*otlplogs.LogRecord - if cap(dest) < len(src) { - newDest = make([]*otlplogs.LogRecord, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigLogRecord() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigLogRecord(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigLogRecord() - } - } - for i := range src { - CopyOrigLogRecord(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestLogRecordSlice() []*otlplogs.LogRecord { - orig := make([]*otlplogs.LogRecord, 5) - orig[0] = NewOrigLogRecord() - orig[1] = GenTestOrigLogRecord() - orig[2] = NewOrigLogRecord() - orig[3] = GenTestOrigLogRecord() - orig[4] = NewOrigLogRecord() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_mappingslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_mappingslice.go deleted file mode 100644 index fa6e5bd416a..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_mappingslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - -func CopyOrigMappingSlice(dest, src []*otlpprofiles.Mapping) []*otlpprofiles.Mapping { - var newDest []*otlpprofiles.Mapping - if cap(dest) < len(src) { - newDest = make([]*otlpprofiles.Mapping, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigMapping() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigMapping(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigMapping() - } - } - for i := range src { - CopyOrigMapping(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestMappingSlice() []*otlpprofiles.Mapping { - orig := make([]*otlpprofiles.Mapping, 5) - orig[0] = NewOrigMapping() - orig[1] = GenTestOrigMapping() - orig[2] = NewOrigMapping() - orig[3] = GenTestOrigMapping() - orig[4] = NewOrigMapping() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_metric.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_metric.go deleted file mode 100644 index 6dee89bdbbc..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_metric.go +++ /dev/null @@ -1,635 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolMetric = sync.Pool{ - New: func() any { - return &otlpmetrics.Metric{} - }, - } - - ProtoPoolMetric_Gauge = sync.Pool{ - New: func() any { - return &otlpmetrics.Metric_Gauge{} - }, - } - - ProtoPoolMetric_Sum = sync.Pool{ - New: func() any { - return &otlpmetrics.Metric_Sum{} - }, - } - - ProtoPoolMetric_Histogram = sync.Pool{ - New: func() any { - return &otlpmetrics.Metric_Histogram{} - }, - } - - ProtoPoolMetric_ExponentialHistogram = sync.Pool{ - New: func() any { - return &otlpmetrics.Metric_ExponentialHistogram{} - }, - } - - ProtoPoolMetric_Summary = sync.Pool{ - New: func() any { - return &otlpmetrics.Metric_Summary{} - }, - } -) - -func NewOrigMetric() *otlpmetrics.Metric { - if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.Metric{} - } - return protoPoolMetric.Get().(*otlpmetrics.Metric) -} - -func DeleteOrigMetric(orig *otlpmetrics.Metric, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - switch ov := orig.Data.(type) { - case *otlpmetrics.Metric_Gauge: - DeleteOrigGauge(ov.Gauge, true) - ov.Gauge = nil - ProtoPoolMetric_Gauge.Put(ov) - case *otlpmetrics.Metric_Sum: - DeleteOrigSum(ov.Sum, true) - ov.Sum = nil - ProtoPoolMetric_Sum.Put(ov) - case *otlpmetrics.Metric_Histogram: - DeleteOrigHistogram(ov.Histogram, true) - ov.Histogram = nil - ProtoPoolMetric_Histogram.Put(ov) - case *otlpmetrics.Metric_ExponentialHistogram: - DeleteOrigExponentialHistogram(ov.ExponentialHistogram, true) - ov.ExponentialHistogram = nil - ProtoPoolMetric_ExponentialHistogram.Put(ov) - case *otlpmetrics.Metric_Summary: - DeleteOrigSummary(ov.Summary, true) - ov.Summary = nil - ProtoPoolMetric_Summary.Put(ov) - - } - for i := range orig.Metadata { - DeleteOrigKeyValue(&orig.Metadata[i], false) - } - - orig.Reset() - if nullable { - protoPoolMetric.Put(orig) - } -} - -func CopyOrigMetric(dest, src *otlpmetrics.Metric) { - // If copying to same object, just return. - if src == dest { - return - } - dest.Name = src.Name - dest.Description = src.Description - dest.Unit = src.Unit - switch t := src.Data.(type) { - case *otlpmetrics.Metric_Gauge: - var ov *otlpmetrics.Metric_Gauge - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Gauge{} - } else { - ov = ProtoPoolMetric_Gauge.Get().(*otlpmetrics.Metric_Gauge) - } - ov.Gauge = NewOrigGauge() - CopyOrigGauge(ov.Gauge, t.Gauge) - dest.Data = ov - case *otlpmetrics.Metric_Sum: - var ov *otlpmetrics.Metric_Sum - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Sum{} - } else { - ov = ProtoPoolMetric_Sum.Get().(*otlpmetrics.Metric_Sum) - } - ov.Sum = NewOrigSum() - CopyOrigSum(ov.Sum, t.Sum) - dest.Data = ov - case *otlpmetrics.Metric_Histogram: - var ov *otlpmetrics.Metric_Histogram - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Histogram{} - } else { - ov = ProtoPoolMetric_Histogram.Get().(*otlpmetrics.Metric_Histogram) - } - ov.Histogram = NewOrigHistogram() - CopyOrigHistogram(ov.Histogram, t.Histogram) - dest.Data = ov - case *otlpmetrics.Metric_ExponentialHistogram: - var ov *otlpmetrics.Metric_ExponentialHistogram - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_ExponentialHistogram{} - } else { - ov = ProtoPoolMetric_ExponentialHistogram.Get().(*otlpmetrics.Metric_ExponentialHistogram) - } - ov.ExponentialHistogram = NewOrigExponentialHistogram() - CopyOrigExponentialHistogram(ov.ExponentialHistogram, t.ExponentialHistogram) - dest.Data = ov - case *otlpmetrics.Metric_Summary: - var ov *otlpmetrics.Metric_Summary - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Summary{} - } else { - ov = ProtoPoolMetric_Summary.Get().(*otlpmetrics.Metric_Summary) - } - ov.Summary = NewOrigSummary() - CopyOrigSummary(ov.Summary, t.Summary) - dest.Data = ov - } - dest.Metadata = CopyOrigKeyValueSlice(dest.Metadata, src.Metadata) -} - -func GenTestOrigMetric() *otlpmetrics.Metric { - orig := NewOrigMetric() - orig.Name = "test_name" - orig.Description = "test_description" - orig.Unit = "test_unit" - orig.Data = &otlpmetrics.Metric_Sum{Sum: GenTestOrigSum()} - orig.Metadata = GenerateOrigTestKeyValueSlice() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigMetric(orig *otlpmetrics.Metric, dest *json.Stream) { - dest.WriteObjectStart() - if orig.Name != "" { - dest.WriteObjectField("name") - dest.WriteString(orig.Name) - } - if orig.Description != "" { - dest.WriteObjectField("description") - dest.WriteString(orig.Description) - } - if orig.Unit != "" { - dest.WriteObjectField("unit") - dest.WriteString(orig.Unit) - } - switch orig := orig.Data.(type) { - case *otlpmetrics.Metric_Gauge: - if orig.Gauge != nil { - dest.WriteObjectField("gauge") - MarshalJSONOrigGauge(orig.Gauge, dest) - } - case *otlpmetrics.Metric_Sum: - if orig.Sum != nil { - dest.WriteObjectField("sum") - MarshalJSONOrigSum(orig.Sum, dest) - } - case *otlpmetrics.Metric_Histogram: - if orig.Histogram != nil { - dest.WriteObjectField("histogram") - MarshalJSONOrigHistogram(orig.Histogram, dest) - } - case *otlpmetrics.Metric_ExponentialHistogram: - if orig.ExponentialHistogram != nil { - dest.WriteObjectField("exponentialHistogram") - MarshalJSONOrigExponentialHistogram(orig.ExponentialHistogram, dest) - } - case *otlpmetrics.Metric_Summary: - if orig.Summary != nil { - dest.WriteObjectField("summary") - MarshalJSONOrigSummary(orig.Summary, dest) - } - } - if len(orig.Metadata) > 0 { - dest.WriteObjectField("metadata") - dest.WriteArrayStart() - MarshalJSONOrigKeyValue(&orig.Metadata[0], dest) - for i := 1; i < len(orig.Metadata); i++ { - dest.WriteMore() - MarshalJSONOrigKeyValue(&orig.Metadata[i], dest) - } - dest.WriteArrayEnd() - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigMetric unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigMetric(orig *otlpmetrics.Metric, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "name": - orig.Name = iter.ReadString() - case "description": - orig.Description = iter.ReadString() - case "unit": - orig.Unit = iter.ReadString() - - case "gauge": - { - var ov *otlpmetrics.Metric_Gauge - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Gauge{} - } else { - ov = ProtoPoolMetric_Gauge.Get().(*otlpmetrics.Metric_Gauge) - } - ov.Gauge = NewOrigGauge() - UnmarshalJSONOrigGauge(ov.Gauge, iter) - orig.Data = ov - } - - case "sum": - { - var ov *otlpmetrics.Metric_Sum - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Sum{} - } else { - ov = ProtoPoolMetric_Sum.Get().(*otlpmetrics.Metric_Sum) - } - ov.Sum = NewOrigSum() - UnmarshalJSONOrigSum(ov.Sum, iter) - orig.Data = ov - } - - case "histogram": - { - var ov *otlpmetrics.Metric_Histogram - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Histogram{} - } else { - ov = ProtoPoolMetric_Histogram.Get().(*otlpmetrics.Metric_Histogram) - } - ov.Histogram = NewOrigHistogram() - UnmarshalJSONOrigHistogram(ov.Histogram, iter) - orig.Data = ov - } - - case "exponentialHistogram", "exponential_histogram": - { - var ov *otlpmetrics.Metric_ExponentialHistogram - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_ExponentialHistogram{} - } else { - ov = ProtoPoolMetric_ExponentialHistogram.Get().(*otlpmetrics.Metric_ExponentialHistogram) - } - ov.ExponentialHistogram = NewOrigExponentialHistogram() - UnmarshalJSONOrigExponentialHistogram(ov.ExponentialHistogram, iter) - orig.Data = ov - } - - case "summary": - { - var ov *otlpmetrics.Metric_Summary - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Summary{} - } else { - ov = ProtoPoolMetric_Summary.Get().(*otlpmetrics.Metric_Summary) - } - ov.Summary = NewOrigSummary() - UnmarshalJSONOrigSummary(ov.Summary, iter) - orig.Data = ov - } - - case "metadata": - for iter.ReadArray() { - orig.Metadata = append(orig.Metadata, otlpcommon.KeyValue{}) - UnmarshalJSONOrigKeyValue(&orig.Metadata[len(orig.Metadata)-1], iter) - } - - default: - iter.Skip() - } - } -} - -func SizeProtoOrigMetric(orig *otlpmetrics.Metric) int { - var n int - var l int - _ = l - l = len(orig.Name) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - l = len(orig.Description) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - l = len(orig.Unit) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - switch orig := orig.Data.(type) { - case nil: - _ = orig - break - case *otlpmetrics.Metric_Gauge: - l = SizeProtoOrigGauge(orig.Gauge) - n += 1 + proto.Sov(uint64(l)) + l - case *otlpmetrics.Metric_Sum: - l = SizeProtoOrigSum(orig.Sum) - n += 1 + proto.Sov(uint64(l)) + l - case *otlpmetrics.Metric_Histogram: - l = SizeProtoOrigHistogram(orig.Histogram) - n += 1 + proto.Sov(uint64(l)) + l - case *otlpmetrics.Metric_ExponentialHistogram: - l = SizeProtoOrigExponentialHistogram(orig.ExponentialHistogram) - n += 1 + proto.Sov(uint64(l)) + l - case *otlpmetrics.Metric_Summary: - l = SizeProtoOrigSummary(orig.Summary) - n += 1 + proto.Sov(uint64(l)) + l - } - for i := range orig.Metadata { - l = SizeProtoOrigKeyValue(&orig.Metadata[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigMetric(orig *otlpmetrics.Metric, buf []byte) int { - pos := len(buf) - var l int - _ = l - l = len(orig.Name) - if l > 0 { - pos -= l - copy(buf[pos:], orig.Name) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - l = len(orig.Description) - if l > 0 { - pos -= l - copy(buf[pos:], orig.Description) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - l = len(orig.Unit) - if l > 0 { - pos -= l - copy(buf[pos:], orig.Unit) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x1a - } - switch orig := orig.Data.(type) { - case *otlpmetrics.Metric_Gauge: - - l = MarshalProtoOrigGauge(orig.Gauge, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x2a - - case *otlpmetrics.Metric_Sum: - - l = MarshalProtoOrigSum(orig.Sum, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x3a - - case *otlpmetrics.Metric_Histogram: - - l = MarshalProtoOrigHistogram(orig.Histogram, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x4a - - case *otlpmetrics.Metric_ExponentialHistogram: - - l = MarshalProtoOrigExponentialHistogram(orig.ExponentialHistogram, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x52 - - case *otlpmetrics.Metric_Summary: - - l = MarshalProtoOrigSummary(orig.Summary, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x5a - - } - for i := len(orig.Metadata) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValue(&orig.Metadata[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x62 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigMetric(orig *otlpmetrics.Metric, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Name = string(buf[startPos:pos]) - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Description = string(buf[startPos:pos]) - - case 3: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Unit = string(buf[startPos:pos]) - - case 5: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - var ov *otlpmetrics.Metric_Gauge - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Gauge{} - } else { - ov = ProtoPoolMetric_Gauge.Get().(*otlpmetrics.Metric_Gauge) - } - ov.Gauge = NewOrigGauge() - err = UnmarshalProtoOrigGauge(ov.Gauge, buf[startPos:pos]) - if err != nil { - return err - } - orig.Data = ov - - case 7: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - var ov *otlpmetrics.Metric_Sum - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Sum{} - } else { - ov = ProtoPoolMetric_Sum.Get().(*otlpmetrics.Metric_Sum) - } - ov.Sum = NewOrigSum() - err = UnmarshalProtoOrigSum(ov.Sum, buf[startPos:pos]) - if err != nil { - return err - } - orig.Data = ov - - case 9: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - var ov *otlpmetrics.Metric_Histogram - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Histogram{} - } else { - ov = ProtoPoolMetric_Histogram.Get().(*otlpmetrics.Metric_Histogram) - } - ov.Histogram = NewOrigHistogram() - err = UnmarshalProtoOrigHistogram(ov.Histogram, buf[startPos:pos]) - if err != nil { - return err - } - orig.Data = ov - - case 10: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - var ov *otlpmetrics.Metric_ExponentialHistogram - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_ExponentialHistogram{} - } else { - ov = ProtoPoolMetric_ExponentialHistogram.Get().(*otlpmetrics.Metric_ExponentialHistogram) - } - ov.ExponentialHistogram = NewOrigExponentialHistogram() - err = UnmarshalProtoOrigExponentialHistogram(ov.ExponentialHistogram, buf[startPos:pos]) - if err != nil { - return err - } - orig.Data = ov - - case 11: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - var ov *otlpmetrics.Metric_Summary - if !UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Summary{} - } else { - ov = ProtoPoolMetric_Summary.Get().(*otlpmetrics.Metric_Summary) - } - ov.Summary = NewOrigSummary() - err = UnmarshalProtoOrigSummary(ov.Summary, buf[startPos:pos]) - if err != nil { - return err - } - orig.Data = ov - - case 12: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Metadata = append(orig.Metadata, otlpcommon.KeyValue{}) - err = UnmarshalProtoOrigKeyValue(&orig.Metadata[len(orig.Metadata)-1], buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_metricslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_metricslice.go deleted file mode 100644 index b6b82452dc9..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_metricslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" -) - -func CopyOrigMetricSlice(dest, src []*otlpmetrics.Metric) []*otlpmetrics.Metric { - var newDest []*otlpmetrics.Metric - if cap(dest) < len(src) { - newDest = make([]*otlpmetrics.Metric, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigMetric() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigMetric(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigMetric() - } - } - for i := range src { - CopyOrigMetric(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestMetricSlice() []*otlpmetrics.Metric { - orig := make([]*otlpmetrics.Metric, 5) - orig[0] = NewOrigMetric() - orig[1] = GenTestOrigMetric() - orig[2] = NewOrigMetric() - orig[3] = GenTestOrigMetric() - orig[4] = NewOrigMetric() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_numberdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_numberdatapointslice.go deleted file mode 100644 index 3ac7b17ab52..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_numberdatapointslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" -) - -func CopyOrigNumberDataPointSlice(dest, src []*otlpmetrics.NumberDataPoint) []*otlpmetrics.NumberDataPoint { - var newDest []*otlpmetrics.NumberDataPoint - if cap(dest) < len(src) { - newDest = make([]*otlpmetrics.NumberDataPoint, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigNumberDataPoint() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigNumberDataPoint(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigNumberDataPoint() - } - } - for i := range src { - CopyOrigNumberDataPoint(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestNumberDataPointSlice() []*otlpmetrics.NumberDataPoint { - orig := make([]*otlpmetrics.NumberDataPoint, 5) - orig[0] = NewOrigNumberDataPoint() - orig[1] = GenTestOrigNumberDataPoint() - orig[2] = NewOrigNumberDataPoint() - orig[3] = GenTestOrigNumberDataPoint() - orig[4] = NewOrigNumberDataPoint() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdata.go new file mode 100644 index 00000000000..7bece6d028b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdata.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +type ProfilesDataWrapper struct { + orig *ProfilesData + state *State +} + +func GetProfilesDataOrig(ms ProfilesDataWrapper) *ProfilesData { + return ms.orig +} + +func GetProfilesDataState(ms ProfilesDataWrapper) *State { + return ms.state +} + +func NewProfilesDataWrapper(orig *ProfilesData, state *State) ProfilesDataWrapper { + return ProfilesDataWrapper{orig: orig, state: state} +} + +func GenTestProfilesDataWrapper() ProfilesDataWrapper { + return NewProfilesDataWrapper(GenTestProfilesData(), NewState()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profileslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profileslice.go deleted file mode 100644 index 84ec005150f..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profileslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - -func CopyOrigProfileSlice(dest, src []*otlpprofiles.Profile) []*otlpprofiles.Profile { - var newDest []*otlpprofiles.Profile - if cap(dest) < len(src) { - newDest = make([]*otlpprofiles.Profile, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigProfile() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigProfile(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigProfile() - } - } - for i := range src { - CopyOrigProfile(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestProfileSlice() []*otlpprofiles.Profile { - orig := make([]*otlpprofiles.Profile, 5) - orig[0] = NewOrigProfile() - orig[1] = GenTestOrigProfile() - orig[2] = NewOrigProfile() - orig[3] = GenTestOrigProfile() - orig[4] = NewOrigProfile() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go index 6e45dbdec69..1d6cabfebfd 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go @@ -6,251 +6,23 @@ package internal -import ( - "fmt" - "sync" - - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -type Resource struct { - orig *otlpresource.Resource +type ResourceWrapper struct { + orig *Resource state *State } -func GetOrigResource(ms Resource) *otlpresource.Resource { +func GetResourceOrig(ms ResourceWrapper) *Resource { return ms.orig } -func GetResourceState(ms Resource) *State { +func GetResourceState(ms ResourceWrapper) *State { return ms.state } -func NewResource(orig *otlpresource.Resource, state *State) Resource { - return Resource{orig: orig, state: state} -} - -var ( - protoPoolResource = sync.Pool{ - New: func() any { - return &otlpresource.Resource{} - }, - } -) - -func NewOrigResource() *otlpresource.Resource { - if !UseProtoPooling.IsEnabled() { - return &otlpresource.Resource{} - } - return protoPoolResource.Get().(*otlpresource.Resource) +func NewResourceWrapper(orig *Resource, state *State) ResourceWrapper { + return ResourceWrapper{orig: orig, state: state} } -func DeleteOrigResource(orig *otlpresource.Resource, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.Attributes { - DeleteOrigKeyValue(&orig.Attributes[i], false) - } - for i := range orig.EntityRefs { - DeleteOrigEntityRef(orig.EntityRefs[i], true) - } - - orig.Reset() - if nullable { - protoPoolResource.Put(orig) - } -} - -func CopyOrigResource(dest, src *otlpresource.Resource) { - // If copying to same object, just return. - if src == dest { - return - } - dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes) - dest.DroppedAttributesCount = src.DroppedAttributesCount - dest.EntityRefs = CopyOrigEntityRefSlice(dest.EntityRefs, src.EntityRefs) -} - -func GenTestOrigResource() *otlpresource.Resource { - orig := NewOrigResource() - orig.Attributes = GenerateOrigTestKeyValueSlice() - orig.DroppedAttributesCount = uint32(13) - orig.EntityRefs = GenerateOrigTestEntityRefSlice() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigResource(orig *otlpresource.Resource, dest *json.Stream) { - dest.WriteObjectStart() - if len(orig.Attributes) > 0 { - dest.WriteObjectField("attributes") - dest.WriteArrayStart() - MarshalJSONOrigKeyValue(&orig.Attributes[0], dest) - for i := 1; i < len(orig.Attributes); i++ { - dest.WriteMore() - MarshalJSONOrigKeyValue(&orig.Attributes[i], dest) - } - dest.WriteArrayEnd() - } - if orig.DroppedAttributesCount != uint32(0) { - dest.WriteObjectField("droppedAttributesCount") - dest.WriteUint32(orig.DroppedAttributesCount) - } - if len(orig.EntityRefs) > 0 { - dest.WriteObjectField("entityRefs") - dest.WriteArrayStart() - MarshalJSONOrigEntityRef(orig.EntityRefs[0], dest) - for i := 1; i < len(orig.EntityRefs); i++ { - dest.WriteMore() - MarshalJSONOrigEntityRef(orig.EntityRefs[i], dest) - } - dest.WriteArrayEnd() - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigResource unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigResource(orig *otlpresource.Resource, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "attributes": - for iter.ReadArray() { - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter) - } - - case "droppedAttributesCount", "dropped_attributes_count": - orig.DroppedAttributesCount = iter.ReadUint32() - case "entityRefs", "entity_refs": - for iter.ReadArray() { - orig.EntityRefs = append(orig.EntityRefs, NewOrigEntityRef()) - UnmarshalJSONOrigEntityRef(orig.EntityRefs[len(orig.EntityRefs)-1], iter) - } - - default: - iter.Skip() - } - } -} - -func SizeProtoOrigResource(orig *otlpresource.Resource) int { - var n int - var l int - _ = l - for i := range orig.Attributes { - l = SizeProtoOrigKeyValue(&orig.Attributes[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - if orig.DroppedAttributesCount != 0 { - n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount)) - } - for i := range orig.EntityRefs { - l = SizeProtoOrigEntityRef(orig.EntityRefs[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigResource(orig *otlpresource.Resource, buf []byte) int { - pos := len(buf) - var l int - _ = l - for i := len(orig.Attributes) - 1; i >= 0; i-- { - l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - if orig.DroppedAttributesCount != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount)) - pos-- - buf[pos] = 0x10 - } - for i := len(orig.EntityRefs) - 1; i >= 0; i-- { - l = MarshalProtoOrigEntityRef(orig.EntityRefs[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x1a - } - return len(buf) - pos -} - -func UnmarshalProtoOrigResource(orig *otlpresource.Resource, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{}) - err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.DroppedAttributesCount = uint32(num) - - case 3: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field EntityRefs", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.EntityRefs = append(orig.EntityRefs, NewOrigEntityRef()) - err = UnmarshalProtoOrigEntityRef(orig.EntityRefs[len(orig.EntityRefs)-1], buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil +func GenTestResourceWrapper() ResourceWrapper { + return NewResourceWrapper(GenTestResource(), NewState()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcelogs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcelogs.go deleted file mode 100644 index 4c78e1246a6..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcelogs.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolResourceLogs = sync.Pool{ - New: func() any { - return &otlplogs.ResourceLogs{} - }, - } -) - -func NewOrigResourceLogs() *otlplogs.ResourceLogs { - if !UseProtoPooling.IsEnabled() { - return &otlplogs.ResourceLogs{} - } - return protoPoolResourceLogs.Get().(*otlplogs.ResourceLogs) -} - -func DeleteOrigResourceLogs(orig *otlplogs.ResourceLogs, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigResource(&orig.Resource, false) - for i := range orig.ScopeLogs { - DeleteOrigScopeLogs(orig.ScopeLogs[i], true) - } - - orig.Reset() - if nullable { - protoPoolResourceLogs.Put(orig) - } -} - -func CopyOrigResourceLogs(dest, src *otlplogs.ResourceLogs) { - // If copying to same object, just return. - if src == dest { - return - } - CopyOrigResource(&dest.Resource, &src.Resource) - dest.ScopeLogs = CopyOrigScopeLogsSlice(dest.ScopeLogs, src.ScopeLogs) - dest.SchemaUrl = src.SchemaUrl -} - -func GenTestOrigResourceLogs() *otlplogs.ResourceLogs { - orig := NewOrigResourceLogs() - orig.Resource = *GenTestOrigResource() - orig.ScopeLogs = GenerateOrigTestScopeLogsSlice() - orig.SchemaUrl = "test_schemaurl" - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigResourceLogs(orig *otlplogs.ResourceLogs, dest *json.Stream) { - dest.WriteObjectStart() - dest.WriteObjectField("resource") - MarshalJSONOrigResource(&orig.Resource, dest) - if len(orig.ScopeLogs) > 0 { - dest.WriteObjectField("scopeLogs") - dest.WriteArrayStart() - MarshalJSONOrigScopeLogs(orig.ScopeLogs[0], dest) - for i := 1; i < len(orig.ScopeLogs); i++ { - dest.WriteMore() - MarshalJSONOrigScopeLogs(orig.ScopeLogs[i], dest) - } - dest.WriteArrayEnd() - } - if orig.SchemaUrl != "" { - dest.WriteObjectField("schemaUrl") - dest.WriteString(orig.SchemaUrl) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigResourceLogs unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigResourceLogs(orig *otlplogs.ResourceLogs, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "resource": - UnmarshalJSONOrigResource(&orig.Resource, iter) - case "scopeLogs", "scope_logs": - for iter.ReadArray() { - orig.ScopeLogs = append(orig.ScopeLogs, NewOrigScopeLogs()) - UnmarshalJSONOrigScopeLogs(orig.ScopeLogs[len(orig.ScopeLogs)-1], iter) - } - - case "schemaUrl", "schema_url": - orig.SchemaUrl = iter.ReadString() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigResourceLogs(orig *otlplogs.ResourceLogs) int { - var n int - var l int - _ = l - l = SizeProtoOrigResource(&orig.Resource) - n += 1 + proto.Sov(uint64(l)) + l - for i := range orig.ScopeLogs { - l = SizeProtoOrigScopeLogs(orig.ScopeLogs[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - l = len(orig.SchemaUrl) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigResourceLogs(orig *otlplogs.ResourceLogs, buf []byte) int { - pos := len(buf) - var l int - _ = l - - l = MarshalProtoOrigResource(&orig.Resource, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - for i := len(orig.ScopeLogs) - 1; i >= 0; i-- { - l = MarshalProtoOrigScopeLogs(orig.ScopeLogs[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - l = len(orig.SchemaUrl) - if l > 0 { - pos -= l - copy(buf[pos:], orig.SchemaUrl) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x1a - } - return len(buf) - pos -} - -func UnmarshalProtoOrigResourceLogs(orig *otlplogs.ResourceLogs, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigResource(&orig.Resource, buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ScopeLogs", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.ScopeLogs = append(orig.ScopeLogs, NewOrigScopeLogs()) - err = UnmarshalProtoOrigScopeLogs(orig.ScopeLogs[len(orig.ScopeLogs)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 3: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.SchemaUrl = string(buf[startPos:pos]) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcelogsslice.go deleted file mode 100644 index dcf0a0afb08..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcelogsslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" -) - -func CopyOrigResourceLogsSlice(dest, src []*otlplogs.ResourceLogs) []*otlplogs.ResourceLogs { - var newDest []*otlplogs.ResourceLogs - if cap(dest) < len(src) { - newDest = make([]*otlplogs.ResourceLogs, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigResourceLogs() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigResourceLogs(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigResourceLogs() - } - } - for i := range src { - CopyOrigResourceLogs(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestResourceLogsSlice() []*otlplogs.ResourceLogs { - orig := make([]*otlplogs.ResourceLogs, 5) - orig[0] = NewOrigResourceLogs() - orig[1] = GenTestOrigResourceLogs() - orig[2] = NewOrigResourceLogs() - orig[3] = GenTestOrigResourceLogs() - orig[4] = NewOrigResourceLogs() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcemetrics.go deleted file mode 100644 index a412cfee854..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcemetrics.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolResourceMetrics = sync.Pool{ - New: func() any { - return &otlpmetrics.ResourceMetrics{} - }, - } -) - -func NewOrigResourceMetrics() *otlpmetrics.ResourceMetrics { - if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.ResourceMetrics{} - } - return protoPoolResourceMetrics.Get().(*otlpmetrics.ResourceMetrics) -} - -func DeleteOrigResourceMetrics(orig *otlpmetrics.ResourceMetrics, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigResource(&orig.Resource, false) - for i := range orig.ScopeMetrics { - DeleteOrigScopeMetrics(orig.ScopeMetrics[i], true) - } - - orig.Reset() - if nullable { - protoPoolResourceMetrics.Put(orig) - } -} - -func CopyOrigResourceMetrics(dest, src *otlpmetrics.ResourceMetrics) { - // If copying to same object, just return. - if src == dest { - return - } - CopyOrigResource(&dest.Resource, &src.Resource) - dest.ScopeMetrics = CopyOrigScopeMetricsSlice(dest.ScopeMetrics, src.ScopeMetrics) - dest.SchemaUrl = src.SchemaUrl -} - -func GenTestOrigResourceMetrics() *otlpmetrics.ResourceMetrics { - orig := NewOrigResourceMetrics() - orig.Resource = *GenTestOrigResource() - orig.ScopeMetrics = GenerateOrigTestScopeMetricsSlice() - orig.SchemaUrl = "test_schemaurl" - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigResourceMetrics(orig *otlpmetrics.ResourceMetrics, dest *json.Stream) { - dest.WriteObjectStart() - dest.WriteObjectField("resource") - MarshalJSONOrigResource(&orig.Resource, dest) - if len(orig.ScopeMetrics) > 0 { - dest.WriteObjectField("scopeMetrics") - dest.WriteArrayStart() - MarshalJSONOrigScopeMetrics(orig.ScopeMetrics[0], dest) - for i := 1; i < len(orig.ScopeMetrics); i++ { - dest.WriteMore() - MarshalJSONOrigScopeMetrics(orig.ScopeMetrics[i], dest) - } - dest.WriteArrayEnd() - } - if orig.SchemaUrl != "" { - dest.WriteObjectField("schemaUrl") - dest.WriteString(orig.SchemaUrl) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigResourceMetrics unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigResourceMetrics(orig *otlpmetrics.ResourceMetrics, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "resource": - UnmarshalJSONOrigResource(&orig.Resource, iter) - case "scopeMetrics", "scope_metrics": - for iter.ReadArray() { - orig.ScopeMetrics = append(orig.ScopeMetrics, NewOrigScopeMetrics()) - UnmarshalJSONOrigScopeMetrics(orig.ScopeMetrics[len(orig.ScopeMetrics)-1], iter) - } - - case "schemaUrl", "schema_url": - orig.SchemaUrl = iter.ReadString() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigResourceMetrics(orig *otlpmetrics.ResourceMetrics) int { - var n int - var l int - _ = l - l = SizeProtoOrigResource(&orig.Resource) - n += 1 + proto.Sov(uint64(l)) + l - for i := range orig.ScopeMetrics { - l = SizeProtoOrigScopeMetrics(orig.ScopeMetrics[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - l = len(orig.SchemaUrl) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigResourceMetrics(orig *otlpmetrics.ResourceMetrics, buf []byte) int { - pos := len(buf) - var l int - _ = l - - l = MarshalProtoOrigResource(&orig.Resource, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - for i := len(orig.ScopeMetrics) - 1; i >= 0; i-- { - l = MarshalProtoOrigScopeMetrics(orig.ScopeMetrics[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - l = len(orig.SchemaUrl) - if l > 0 { - pos -= l - copy(buf[pos:], orig.SchemaUrl) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x1a - } - return len(buf) - pos -} - -func UnmarshalProtoOrigResourceMetrics(orig *otlpmetrics.ResourceMetrics, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigResource(&orig.Resource, buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.ScopeMetrics = append(orig.ScopeMetrics, NewOrigScopeMetrics()) - err = UnmarshalProtoOrigScopeMetrics(orig.ScopeMetrics[len(orig.ScopeMetrics)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 3: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.SchemaUrl = string(buf[startPos:pos]) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcemetricsslice.go deleted file mode 100644 index 7bd0db8fc51..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcemetricsslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" -) - -func CopyOrigResourceMetricsSlice(dest, src []*otlpmetrics.ResourceMetrics) []*otlpmetrics.ResourceMetrics { - var newDest []*otlpmetrics.ResourceMetrics - if cap(dest) < len(src) { - newDest = make([]*otlpmetrics.ResourceMetrics, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigResourceMetrics() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigResourceMetrics(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigResourceMetrics() - } - } - for i := range src { - CopyOrigResourceMetrics(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestResourceMetricsSlice() []*otlpmetrics.ResourceMetrics { - orig := make([]*otlpmetrics.ResourceMetrics, 5) - orig[0] = NewOrigResourceMetrics() - orig[1] = GenTestOrigResourceMetrics() - orig[2] = NewOrigResourceMetrics() - orig[3] = GenTestOrigResourceMetrics() - orig[4] = NewOrigResourceMetrics() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourceprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourceprofiles.go deleted file mode 100644 index 5481ab28651..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourceprofiles.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolResourceProfiles = sync.Pool{ - New: func() any { - return &otlpprofiles.ResourceProfiles{} - }, - } -) - -func NewOrigResourceProfiles() *otlpprofiles.ResourceProfiles { - if !UseProtoPooling.IsEnabled() { - return &otlpprofiles.ResourceProfiles{} - } - return protoPoolResourceProfiles.Get().(*otlpprofiles.ResourceProfiles) -} - -func DeleteOrigResourceProfiles(orig *otlpprofiles.ResourceProfiles, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigResource(&orig.Resource, false) - for i := range orig.ScopeProfiles { - DeleteOrigScopeProfiles(orig.ScopeProfiles[i], true) - } - - orig.Reset() - if nullable { - protoPoolResourceProfiles.Put(orig) - } -} - -func CopyOrigResourceProfiles(dest, src *otlpprofiles.ResourceProfiles) { - // If copying to same object, just return. - if src == dest { - return - } - CopyOrigResource(&dest.Resource, &src.Resource) - dest.ScopeProfiles = CopyOrigScopeProfilesSlice(dest.ScopeProfiles, src.ScopeProfiles) - dest.SchemaUrl = src.SchemaUrl -} - -func GenTestOrigResourceProfiles() *otlpprofiles.ResourceProfiles { - orig := NewOrigResourceProfiles() - orig.Resource = *GenTestOrigResource() - orig.ScopeProfiles = GenerateOrigTestScopeProfilesSlice() - orig.SchemaUrl = "test_schemaurl" - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigResourceProfiles(orig *otlpprofiles.ResourceProfiles, dest *json.Stream) { - dest.WriteObjectStart() - dest.WriteObjectField("resource") - MarshalJSONOrigResource(&orig.Resource, dest) - if len(orig.ScopeProfiles) > 0 { - dest.WriteObjectField("scopeProfiles") - dest.WriteArrayStart() - MarshalJSONOrigScopeProfiles(orig.ScopeProfiles[0], dest) - for i := 1; i < len(orig.ScopeProfiles); i++ { - dest.WriteMore() - MarshalJSONOrigScopeProfiles(orig.ScopeProfiles[i], dest) - } - dest.WriteArrayEnd() - } - if orig.SchemaUrl != "" { - dest.WriteObjectField("schemaUrl") - dest.WriteString(orig.SchemaUrl) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigResourceProfiles unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigResourceProfiles(orig *otlpprofiles.ResourceProfiles, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "resource": - UnmarshalJSONOrigResource(&orig.Resource, iter) - case "scopeProfiles", "scope_profiles": - for iter.ReadArray() { - orig.ScopeProfiles = append(orig.ScopeProfiles, NewOrigScopeProfiles()) - UnmarshalJSONOrigScopeProfiles(orig.ScopeProfiles[len(orig.ScopeProfiles)-1], iter) - } - - case "schemaUrl", "schema_url": - orig.SchemaUrl = iter.ReadString() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigResourceProfiles(orig *otlpprofiles.ResourceProfiles) int { - var n int - var l int - _ = l - l = SizeProtoOrigResource(&orig.Resource) - n += 1 + proto.Sov(uint64(l)) + l - for i := range orig.ScopeProfiles { - l = SizeProtoOrigScopeProfiles(orig.ScopeProfiles[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - l = len(orig.SchemaUrl) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigResourceProfiles(orig *otlpprofiles.ResourceProfiles, buf []byte) int { - pos := len(buf) - var l int - _ = l - - l = MarshalProtoOrigResource(&orig.Resource, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - for i := len(orig.ScopeProfiles) - 1; i >= 0; i-- { - l = MarshalProtoOrigScopeProfiles(orig.ScopeProfiles[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - l = len(orig.SchemaUrl) - if l > 0 { - pos -= l - copy(buf[pos:], orig.SchemaUrl) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x1a - } - return len(buf) - pos -} - -func UnmarshalProtoOrigResourceProfiles(orig *otlpprofiles.ResourceProfiles, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigResource(&orig.Resource, buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.ScopeProfiles = append(orig.ScopeProfiles, NewOrigScopeProfiles()) - err = UnmarshalProtoOrigScopeProfiles(orig.ScopeProfiles[len(orig.ScopeProfiles)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 3: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.SchemaUrl = string(buf[startPos:pos]) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourceprofilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourceprofilesslice.go deleted file mode 100644 index 978753cdc1d..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourceprofilesslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - -func CopyOrigResourceProfilesSlice(dest, src []*otlpprofiles.ResourceProfiles) []*otlpprofiles.ResourceProfiles { - var newDest []*otlpprofiles.ResourceProfiles - if cap(dest) < len(src) { - newDest = make([]*otlpprofiles.ResourceProfiles, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigResourceProfiles() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigResourceProfiles(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigResourceProfiles() - } - } - for i := range src { - CopyOrigResourceProfiles(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestResourceProfilesSlice() []*otlpprofiles.ResourceProfiles { - orig := make([]*otlpprofiles.ResourceProfiles, 5) - orig[0] = NewOrigResourceProfiles() - orig[1] = GenTestOrigResourceProfiles() - orig[2] = NewOrigResourceProfiles() - orig[3] = GenTestOrigResourceProfiles() - orig[4] = NewOrigResourceProfiles() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcespans.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcespans.go deleted file mode 100644 index 3599e21cd60..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcespans.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolResourceSpans = sync.Pool{ - New: func() any { - return &otlptrace.ResourceSpans{} - }, - } -) - -func NewOrigResourceSpans() *otlptrace.ResourceSpans { - if !UseProtoPooling.IsEnabled() { - return &otlptrace.ResourceSpans{} - } - return protoPoolResourceSpans.Get().(*otlptrace.ResourceSpans) -} - -func DeleteOrigResourceSpans(orig *otlptrace.ResourceSpans, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigResource(&orig.Resource, false) - for i := range orig.ScopeSpans { - DeleteOrigScopeSpans(orig.ScopeSpans[i], true) - } - - orig.Reset() - if nullable { - protoPoolResourceSpans.Put(orig) - } -} - -func CopyOrigResourceSpans(dest, src *otlptrace.ResourceSpans) { - // If copying to same object, just return. - if src == dest { - return - } - CopyOrigResource(&dest.Resource, &src.Resource) - dest.ScopeSpans = CopyOrigScopeSpansSlice(dest.ScopeSpans, src.ScopeSpans) - dest.SchemaUrl = src.SchemaUrl -} - -func GenTestOrigResourceSpans() *otlptrace.ResourceSpans { - orig := NewOrigResourceSpans() - orig.Resource = *GenTestOrigResource() - orig.ScopeSpans = GenerateOrigTestScopeSpansSlice() - orig.SchemaUrl = "test_schemaurl" - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigResourceSpans(orig *otlptrace.ResourceSpans, dest *json.Stream) { - dest.WriteObjectStart() - dest.WriteObjectField("resource") - MarshalJSONOrigResource(&orig.Resource, dest) - if len(orig.ScopeSpans) > 0 { - dest.WriteObjectField("scopeSpans") - dest.WriteArrayStart() - MarshalJSONOrigScopeSpans(orig.ScopeSpans[0], dest) - for i := 1; i < len(orig.ScopeSpans); i++ { - dest.WriteMore() - MarshalJSONOrigScopeSpans(orig.ScopeSpans[i], dest) - } - dest.WriteArrayEnd() - } - if orig.SchemaUrl != "" { - dest.WriteObjectField("schemaUrl") - dest.WriteString(orig.SchemaUrl) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigResourceSpans unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigResourceSpans(orig *otlptrace.ResourceSpans, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "resource": - UnmarshalJSONOrigResource(&orig.Resource, iter) - case "scopeSpans", "scope_spans": - for iter.ReadArray() { - orig.ScopeSpans = append(orig.ScopeSpans, NewOrigScopeSpans()) - UnmarshalJSONOrigScopeSpans(orig.ScopeSpans[len(orig.ScopeSpans)-1], iter) - } - - case "schemaUrl", "schema_url": - orig.SchemaUrl = iter.ReadString() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigResourceSpans(orig *otlptrace.ResourceSpans) int { - var n int - var l int - _ = l - l = SizeProtoOrigResource(&orig.Resource) - n += 1 + proto.Sov(uint64(l)) + l - for i := range orig.ScopeSpans { - l = SizeProtoOrigScopeSpans(orig.ScopeSpans[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - l = len(orig.SchemaUrl) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigResourceSpans(orig *otlptrace.ResourceSpans, buf []byte) int { - pos := len(buf) - var l int - _ = l - - l = MarshalProtoOrigResource(&orig.Resource, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - for i := len(orig.ScopeSpans) - 1; i >= 0; i-- { - l = MarshalProtoOrigScopeSpans(orig.ScopeSpans[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - l = len(orig.SchemaUrl) - if l > 0 { - pos -= l - copy(buf[pos:], orig.SchemaUrl) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x1a - } - return len(buf) - pos -} - -func UnmarshalProtoOrigResourceSpans(orig *otlptrace.ResourceSpans, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigResource(&orig.Resource, buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field ScopeSpans", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.ScopeSpans = append(orig.ScopeSpans, NewOrigScopeSpans()) - err = UnmarshalProtoOrigScopeSpans(orig.ScopeSpans[len(orig.ScopeSpans)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 3: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.SchemaUrl = string(buf[startPos:pos]) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcespansslice.go deleted file mode 100644 index 3128ef7581f..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcespansslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" -) - -func CopyOrigResourceSpansSlice(dest, src []*otlptrace.ResourceSpans) []*otlptrace.ResourceSpans { - var newDest []*otlptrace.ResourceSpans - if cap(dest) < len(src) { - newDest = make([]*otlptrace.ResourceSpans, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigResourceSpans() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigResourceSpans(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigResourceSpans() - } - } - for i := range src { - CopyOrigResourceSpans(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestResourceSpansSlice() []*otlptrace.ResourceSpans { - orig := make([]*otlptrace.ResourceSpans, 5) - orig[0] = NewOrigResourceSpans() - orig[1] = GenTestOrigResourceSpans() - orig[2] = NewOrigResourceSpans() - orig[3] = GenTestOrigResourceSpans() - orig[4] = NewOrigResourceSpans() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sampleslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sampleslice.go deleted file mode 100644 index a2dfca32163..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sampleslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - -func CopyOrigSampleSlice(dest, src []*otlpprofiles.Sample) []*otlpprofiles.Sample { - var newDest []*otlpprofiles.Sample - if cap(dest) < len(src) { - newDest = make([]*otlpprofiles.Sample, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigSample() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigSample(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigSample() - } - } - for i := range src { - CopyOrigSample(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestSampleSlice() []*otlpprofiles.Sample { - orig := make([]*otlpprofiles.Sample, 5) - orig[0] = NewOrigSample() - orig[1] = GenTestOrigSample() - orig[2] = NewOrigSample() - orig[3] = GenTestOrigSample() - orig[4] = NewOrigSample() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopelogs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopelogs.go deleted file mode 100644 index c98a86112d2..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopelogs.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolScopeLogs = sync.Pool{ - New: func() any { - return &otlplogs.ScopeLogs{} - }, - } -) - -func NewOrigScopeLogs() *otlplogs.ScopeLogs { - if !UseProtoPooling.IsEnabled() { - return &otlplogs.ScopeLogs{} - } - return protoPoolScopeLogs.Get().(*otlplogs.ScopeLogs) -} - -func DeleteOrigScopeLogs(orig *otlplogs.ScopeLogs, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigInstrumentationScope(&orig.Scope, false) - for i := range orig.LogRecords { - DeleteOrigLogRecord(orig.LogRecords[i], true) - } - - orig.Reset() - if nullable { - protoPoolScopeLogs.Put(orig) - } -} - -func CopyOrigScopeLogs(dest, src *otlplogs.ScopeLogs) { - // If copying to same object, just return. - if src == dest { - return - } - CopyOrigInstrumentationScope(&dest.Scope, &src.Scope) - dest.LogRecords = CopyOrigLogRecordSlice(dest.LogRecords, src.LogRecords) - dest.SchemaUrl = src.SchemaUrl -} - -func GenTestOrigScopeLogs() *otlplogs.ScopeLogs { - orig := NewOrigScopeLogs() - orig.Scope = *GenTestOrigInstrumentationScope() - orig.LogRecords = GenerateOrigTestLogRecordSlice() - orig.SchemaUrl = "test_schemaurl" - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigScopeLogs(orig *otlplogs.ScopeLogs, dest *json.Stream) { - dest.WriteObjectStart() - dest.WriteObjectField("scope") - MarshalJSONOrigInstrumentationScope(&orig.Scope, dest) - if len(orig.LogRecords) > 0 { - dest.WriteObjectField("logRecords") - dest.WriteArrayStart() - MarshalJSONOrigLogRecord(orig.LogRecords[0], dest) - for i := 1; i < len(orig.LogRecords); i++ { - dest.WriteMore() - MarshalJSONOrigLogRecord(orig.LogRecords[i], dest) - } - dest.WriteArrayEnd() - } - if orig.SchemaUrl != "" { - dest.WriteObjectField("schemaUrl") - dest.WriteString(orig.SchemaUrl) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigScopeLogs unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigScopeLogs(orig *otlplogs.ScopeLogs, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "scope": - UnmarshalJSONOrigInstrumentationScope(&orig.Scope, iter) - case "logRecords", "log_records": - for iter.ReadArray() { - orig.LogRecords = append(orig.LogRecords, NewOrigLogRecord()) - UnmarshalJSONOrigLogRecord(orig.LogRecords[len(orig.LogRecords)-1], iter) - } - - case "schemaUrl", "schema_url": - orig.SchemaUrl = iter.ReadString() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigScopeLogs(orig *otlplogs.ScopeLogs) int { - var n int - var l int - _ = l - l = SizeProtoOrigInstrumentationScope(&orig.Scope) - n += 1 + proto.Sov(uint64(l)) + l - for i := range orig.LogRecords { - l = SizeProtoOrigLogRecord(orig.LogRecords[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - l = len(orig.SchemaUrl) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigScopeLogs(orig *otlplogs.ScopeLogs, buf []byte) int { - pos := len(buf) - var l int - _ = l - - l = MarshalProtoOrigInstrumentationScope(&orig.Scope, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - for i := len(orig.LogRecords) - 1; i >= 0; i-- { - l = MarshalProtoOrigLogRecord(orig.LogRecords[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - l = len(orig.SchemaUrl) - if l > 0 { - pos -= l - copy(buf[pos:], orig.SchemaUrl) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x1a - } - return len(buf) - pos -} - -func UnmarshalProtoOrigScopeLogs(orig *otlplogs.ScopeLogs, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigInstrumentationScope(&orig.Scope, buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field LogRecords", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.LogRecords = append(orig.LogRecords, NewOrigLogRecord()) - err = UnmarshalProtoOrigLogRecord(orig.LogRecords[len(orig.LogRecords)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 3: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.SchemaUrl = string(buf[startPos:pos]) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopelogsslice.go deleted file mode 100644 index ed0b6cc78fb..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopelogsslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" -) - -func CopyOrigScopeLogsSlice(dest, src []*otlplogs.ScopeLogs) []*otlplogs.ScopeLogs { - var newDest []*otlplogs.ScopeLogs - if cap(dest) < len(src) { - newDest = make([]*otlplogs.ScopeLogs, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigScopeLogs() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigScopeLogs(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigScopeLogs() - } - } - for i := range src { - CopyOrigScopeLogs(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestScopeLogsSlice() []*otlplogs.ScopeLogs { - orig := make([]*otlplogs.ScopeLogs, 5) - orig[0] = NewOrigScopeLogs() - orig[1] = GenTestOrigScopeLogs() - orig[2] = NewOrigScopeLogs() - orig[3] = GenTestOrigScopeLogs() - orig[4] = NewOrigScopeLogs() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopemetrics.go deleted file mode 100644 index 59e05b32feb..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopemetrics.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolScopeMetrics = sync.Pool{ - New: func() any { - return &otlpmetrics.ScopeMetrics{} - }, - } -) - -func NewOrigScopeMetrics() *otlpmetrics.ScopeMetrics { - if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.ScopeMetrics{} - } - return protoPoolScopeMetrics.Get().(*otlpmetrics.ScopeMetrics) -} - -func DeleteOrigScopeMetrics(orig *otlpmetrics.ScopeMetrics, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigInstrumentationScope(&orig.Scope, false) - for i := range orig.Metrics { - DeleteOrigMetric(orig.Metrics[i], true) - } - - orig.Reset() - if nullable { - protoPoolScopeMetrics.Put(orig) - } -} - -func CopyOrigScopeMetrics(dest, src *otlpmetrics.ScopeMetrics) { - // If copying to same object, just return. - if src == dest { - return - } - CopyOrigInstrumentationScope(&dest.Scope, &src.Scope) - dest.Metrics = CopyOrigMetricSlice(dest.Metrics, src.Metrics) - dest.SchemaUrl = src.SchemaUrl -} - -func GenTestOrigScopeMetrics() *otlpmetrics.ScopeMetrics { - orig := NewOrigScopeMetrics() - orig.Scope = *GenTestOrigInstrumentationScope() - orig.Metrics = GenerateOrigTestMetricSlice() - orig.SchemaUrl = "test_schemaurl" - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigScopeMetrics(orig *otlpmetrics.ScopeMetrics, dest *json.Stream) { - dest.WriteObjectStart() - dest.WriteObjectField("scope") - MarshalJSONOrigInstrumentationScope(&orig.Scope, dest) - if len(orig.Metrics) > 0 { - dest.WriteObjectField("metrics") - dest.WriteArrayStart() - MarshalJSONOrigMetric(orig.Metrics[0], dest) - for i := 1; i < len(orig.Metrics); i++ { - dest.WriteMore() - MarshalJSONOrigMetric(orig.Metrics[i], dest) - } - dest.WriteArrayEnd() - } - if orig.SchemaUrl != "" { - dest.WriteObjectField("schemaUrl") - dest.WriteString(orig.SchemaUrl) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigScopeMetrics unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigScopeMetrics(orig *otlpmetrics.ScopeMetrics, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "scope": - UnmarshalJSONOrigInstrumentationScope(&orig.Scope, iter) - case "metrics": - for iter.ReadArray() { - orig.Metrics = append(orig.Metrics, NewOrigMetric()) - UnmarshalJSONOrigMetric(orig.Metrics[len(orig.Metrics)-1], iter) - } - - case "schemaUrl", "schema_url": - orig.SchemaUrl = iter.ReadString() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigScopeMetrics(orig *otlpmetrics.ScopeMetrics) int { - var n int - var l int - _ = l - l = SizeProtoOrigInstrumentationScope(&orig.Scope) - n += 1 + proto.Sov(uint64(l)) + l - for i := range orig.Metrics { - l = SizeProtoOrigMetric(orig.Metrics[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - l = len(orig.SchemaUrl) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigScopeMetrics(orig *otlpmetrics.ScopeMetrics, buf []byte) int { - pos := len(buf) - var l int - _ = l - - l = MarshalProtoOrigInstrumentationScope(&orig.Scope, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - for i := len(orig.Metrics) - 1; i >= 0; i-- { - l = MarshalProtoOrigMetric(orig.Metrics[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - l = len(orig.SchemaUrl) - if l > 0 { - pos -= l - copy(buf[pos:], orig.SchemaUrl) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x1a - } - return len(buf) - pos -} - -func UnmarshalProtoOrigScopeMetrics(orig *otlpmetrics.ScopeMetrics, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigInstrumentationScope(&orig.Scope, buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Metrics = append(orig.Metrics, NewOrigMetric()) - err = UnmarshalProtoOrigMetric(orig.Metrics[len(orig.Metrics)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 3: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.SchemaUrl = string(buf[startPos:pos]) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopemetricsslice.go deleted file mode 100644 index 31cfbf52ef0..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopemetricsslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" -) - -func CopyOrigScopeMetricsSlice(dest, src []*otlpmetrics.ScopeMetrics) []*otlpmetrics.ScopeMetrics { - var newDest []*otlpmetrics.ScopeMetrics - if cap(dest) < len(src) { - newDest = make([]*otlpmetrics.ScopeMetrics, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigScopeMetrics() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigScopeMetrics(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigScopeMetrics() - } - } - for i := range src { - CopyOrigScopeMetrics(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestScopeMetricsSlice() []*otlpmetrics.ScopeMetrics { - orig := make([]*otlpmetrics.ScopeMetrics, 5) - orig[0] = NewOrigScopeMetrics() - orig[1] = GenTestOrigScopeMetrics() - orig[2] = NewOrigScopeMetrics() - orig[3] = GenTestOrigScopeMetrics() - orig[4] = NewOrigScopeMetrics() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopeprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopeprofiles.go deleted file mode 100644 index b64845a360d..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopeprofiles.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolScopeProfiles = sync.Pool{ - New: func() any { - return &otlpprofiles.ScopeProfiles{} - }, - } -) - -func NewOrigScopeProfiles() *otlpprofiles.ScopeProfiles { - if !UseProtoPooling.IsEnabled() { - return &otlpprofiles.ScopeProfiles{} - } - return protoPoolScopeProfiles.Get().(*otlpprofiles.ScopeProfiles) -} - -func DeleteOrigScopeProfiles(orig *otlpprofiles.ScopeProfiles, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigInstrumentationScope(&orig.Scope, false) - for i := range orig.Profiles { - DeleteOrigProfile(orig.Profiles[i], true) - } - - orig.Reset() - if nullable { - protoPoolScopeProfiles.Put(orig) - } -} - -func CopyOrigScopeProfiles(dest, src *otlpprofiles.ScopeProfiles) { - // If copying to same object, just return. - if src == dest { - return - } - CopyOrigInstrumentationScope(&dest.Scope, &src.Scope) - dest.Profiles = CopyOrigProfileSlice(dest.Profiles, src.Profiles) - dest.SchemaUrl = src.SchemaUrl -} - -func GenTestOrigScopeProfiles() *otlpprofiles.ScopeProfiles { - orig := NewOrigScopeProfiles() - orig.Scope = *GenTestOrigInstrumentationScope() - orig.Profiles = GenerateOrigTestProfileSlice() - orig.SchemaUrl = "test_schemaurl" - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigScopeProfiles(orig *otlpprofiles.ScopeProfiles, dest *json.Stream) { - dest.WriteObjectStart() - dest.WriteObjectField("scope") - MarshalJSONOrigInstrumentationScope(&orig.Scope, dest) - if len(orig.Profiles) > 0 { - dest.WriteObjectField("profiles") - dest.WriteArrayStart() - MarshalJSONOrigProfile(orig.Profiles[0], dest) - for i := 1; i < len(orig.Profiles); i++ { - dest.WriteMore() - MarshalJSONOrigProfile(orig.Profiles[i], dest) - } - dest.WriteArrayEnd() - } - if orig.SchemaUrl != "" { - dest.WriteObjectField("schemaUrl") - dest.WriteString(orig.SchemaUrl) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigScopeProfiles unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigScopeProfiles(orig *otlpprofiles.ScopeProfiles, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "scope": - UnmarshalJSONOrigInstrumentationScope(&orig.Scope, iter) - case "profiles": - for iter.ReadArray() { - orig.Profiles = append(orig.Profiles, NewOrigProfile()) - UnmarshalJSONOrigProfile(orig.Profiles[len(orig.Profiles)-1], iter) - } - - case "schemaUrl", "schema_url": - orig.SchemaUrl = iter.ReadString() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigScopeProfiles(orig *otlpprofiles.ScopeProfiles) int { - var n int - var l int - _ = l - l = SizeProtoOrigInstrumentationScope(&orig.Scope) - n += 1 + proto.Sov(uint64(l)) + l - for i := range orig.Profiles { - l = SizeProtoOrigProfile(orig.Profiles[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - l = len(orig.SchemaUrl) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigScopeProfiles(orig *otlpprofiles.ScopeProfiles, buf []byte) int { - pos := len(buf) - var l int - _ = l - - l = MarshalProtoOrigInstrumentationScope(&orig.Scope, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - for i := len(orig.Profiles) - 1; i >= 0; i-- { - l = MarshalProtoOrigProfile(orig.Profiles[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - l = len(orig.SchemaUrl) - if l > 0 { - pos -= l - copy(buf[pos:], orig.SchemaUrl) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x1a - } - return len(buf) - pos -} - -func UnmarshalProtoOrigScopeProfiles(orig *otlpprofiles.ScopeProfiles, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigInstrumentationScope(&orig.Scope, buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Profiles = append(orig.Profiles, NewOrigProfile()) - err = UnmarshalProtoOrigProfile(orig.Profiles[len(orig.Profiles)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 3: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.SchemaUrl = string(buf[startPos:pos]) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopeprofilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopeprofilesslice.go deleted file mode 100644 index 01e0894131f..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopeprofilesslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - -func CopyOrigScopeProfilesSlice(dest, src []*otlpprofiles.ScopeProfiles) []*otlpprofiles.ScopeProfiles { - var newDest []*otlpprofiles.ScopeProfiles - if cap(dest) < len(src) { - newDest = make([]*otlpprofiles.ScopeProfiles, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigScopeProfiles() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigScopeProfiles(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigScopeProfiles() - } - } - for i := range src { - CopyOrigScopeProfiles(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestScopeProfilesSlice() []*otlpprofiles.ScopeProfiles { - orig := make([]*otlpprofiles.ScopeProfiles, 5) - orig[0] = NewOrigScopeProfiles() - orig[1] = GenTestOrigScopeProfiles() - orig[2] = NewOrigScopeProfiles() - orig[3] = GenTestOrigScopeProfiles() - orig[4] = NewOrigScopeProfiles() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopespans.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopespans.go deleted file mode 100644 index d8778228f05..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopespans.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolScopeSpans = sync.Pool{ - New: func() any { - return &otlptrace.ScopeSpans{} - }, - } -) - -func NewOrigScopeSpans() *otlptrace.ScopeSpans { - if !UseProtoPooling.IsEnabled() { - return &otlptrace.ScopeSpans{} - } - return protoPoolScopeSpans.Get().(*otlptrace.ScopeSpans) -} - -func DeleteOrigScopeSpans(orig *otlptrace.ScopeSpans, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - DeleteOrigInstrumentationScope(&orig.Scope, false) - for i := range orig.Spans { - DeleteOrigSpan(orig.Spans[i], true) - } - - orig.Reset() - if nullable { - protoPoolScopeSpans.Put(orig) - } -} - -func CopyOrigScopeSpans(dest, src *otlptrace.ScopeSpans) { - // If copying to same object, just return. - if src == dest { - return - } - CopyOrigInstrumentationScope(&dest.Scope, &src.Scope) - dest.Spans = CopyOrigSpanSlice(dest.Spans, src.Spans) - dest.SchemaUrl = src.SchemaUrl -} - -func GenTestOrigScopeSpans() *otlptrace.ScopeSpans { - orig := NewOrigScopeSpans() - orig.Scope = *GenTestOrigInstrumentationScope() - orig.Spans = GenerateOrigTestSpanSlice() - orig.SchemaUrl = "test_schemaurl" - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigScopeSpans(orig *otlptrace.ScopeSpans, dest *json.Stream) { - dest.WriteObjectStart() - dest.WriteObjectField("scope") - MarshalJSONOrigInstrumentationScope(&orig.Scope, dest) - if len(orig.Spans) > 0 { - dest.WriteObjectField("spans") - dest.WriteArrayStart() - MarshalJSONOrigSpan(orig.Spans[0], dest) - for i := 1; i < len(orig.Spans); i++ { - dest.WriteMore() - MarshalJSONOrigSpan(orig.Spans[i], dest) - } - dest.WriteArrayEnd() - } - if orig.SchemaUrl != "" { - dest.WriteObjectField("schemaUrl") - dest.WriteString(orig.SchemaUrl) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigScopeSpans unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigScopeSpans(orig *otlptrace.ScopeSpans, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "scope": - UnmarshalJSONOrigInstrumentationScope(&orig.Scope, iter) - case "spans": - for iter.ReadArray() { - orig.Spans = append(orig.Spans, NewOrigSpan()) - UnmarshalJSONOrigSpan(orig.Spans[len(orig.Spans)-1], iter) - } - - case "schemaUrl", "schema_url": - orig.SchemaUrl = iter.ReadString() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigScopeSpans(orig *otlptrace.ScopeSpans) int { - var n int - var l int - _ = l - l = SizeProtoOrigInstrumentationScope(&orig.Scope) - n += 1 + proto.Sov(uint64(l)) + l - for i := range orig.Spans { - l = SizeProtoOrigSpan(orig.Spans[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - l = len(orig.SchemaUrl) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigScopeSpans(orig *otlptrace.ScopeSpans, buf []byte) int { - pos := len(buf) - var l int - _ = l - - l = MarshalProtoOrigInstrumentationScope(&orig.Scope, buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - - for i := len(orig.Spans) - 1; i >= 0; i-- { - l = MarshalProtoOrigSpan(orig.Spans[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - l = len(orig.SchemaUrl) - if l > 0 { - pos -= l - copy(buf[pos:], orig.SchemaUrl) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x1a - } - return len(buf) - pos -} - -func UnmarshalProtoOrigScopeSpans(orig *otlptrace.ScopeSpans, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - - err = UnmarshalProtoOrigInstrumentationScope(&orig.Scope, buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Spans = append(orig.Spans, NewOrigSpan()) - err = UnmarshalProtoOrigSpan(orig.Spans[len(orig.Spans)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 3: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.SchemaUrl = string(buf[startPos:pos]) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopespansslice.go deleted file mode 100644 index 23583b1cd79..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopespansslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" -) - -func CopyOrigScopeSpansSlice(dest, src []*otlptrace.ScopeSpans) []*otlptrace.ScopeSpans { - var newDest []*otlptrace.ScopeSpans - if cap(dest) < len(src) { - newDest = make([]*otlptrace.ScopeSpans, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigScopeSpans() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigScopeSpans(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigScopeSpans() - } - } - for i := range src { - CopyOrigScopeSpans(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestScopeSpansSlice() []*otlptrace.ScopeSpans { - orig := make([]*otlptrace.ScopeSpans, 5) - orig[0] = NewOrigScopeSpans() - orig[1] = GenTestOrigScopeSpans() - orig[2] = NewOrigScopeSpans() - orig[3] = GenTestOrigScopeSpans() - orig[4] = NewOrigScopeSpans() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_eventslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_eventslice.go deleted file mode 100644 index b9bd2e44509..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_eventslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" -) - -func CopyOrigSpan_EventSlice(dest, src []*otlptrace.Span_Event) []*otlptrace.Span_Event { - var newDest []*otlptrace.Span_Event - if cap(dest) < len(src) { - newDest = make([]*otlptrace.Span_Event, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigSpan_Event() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigSpan_Event(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigSpan_Event() - } - } - for i := range src { - CopyOrigSpan_Event(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestSpan_EventSlice() []*otlptrace.Span_Event { - orig := make([]*otlptrace.Span_Event, 5) - orig[0] = NewOrigSpan_Event() - orig[1] = GenTestOrigSpan_Event() - orig[2] = NewOrigSpan_Event() - orig[3] = GenTestOrigSpan_Event() - orig[4] = NewOrigSpan_Event() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_linkslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_linkslice.go deleted file mode 100644 index 55aed82146d..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_linkslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" -) - -func CopyOrigSpan_LinkSlice(dest, src []*otlptrace.Span_Link) []*otlptrace.Span_Link { - var newDest []*otlptrace.Span_Link - if cap(dest) < len(src) { - newDest = make([]*otlptrace.Span_Link, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigSpan_Link() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigSpan_Link(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigSpan_Link() - } - } - for i := range src { - CopyOrigSpan_Link(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestSpan_LinkSlice() []*otlptrace.Span_Link { - orig := make([]*otlptrace.Span_Link, 5) - orig[0] = NewOrigSpan_Link() - orig[1] = GenTestOrigSpan_Link() - orig[2] = NewOrigSpan_Link() - orig[3] = GenTestOrigSpan_Link() - orig[4] = NewOrigSpan_Link() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_spanslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_spanslice.go deleted file mode 100644 index 7dcbb05f463..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_spanslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" -) - -func CopyOrigSpanSlice(dest, src []*otlptrace.Span) []*otlptrace.Span { - var newDest []*otlptrace.Span - if cap(dest) < len(src) { - newDest = make([]*otlptrace.Span, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigSpan() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigSpan(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigSpan() - } - } - for i := range src { - CopyOrigSpan(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestSpanSlice() []*otlptrace.Span { - orig := make([]*otlptrace.Span, 5) - orig[0] = NewOrigSpan() - orig[1] = GenTestOrigSpan() - orig[2] = NewOrigSpan() - orig[3] = GenTestOrigSpan() - orig[4] = NewOrigSpan() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stackslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stackslice.go deleted file mode 100644 index c49f24d3340..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stackslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - -func CopyOrigStackSlice(dest, src []*otlpprofiles.Stack) []*otlpprofiles.Stack { - var newDest []*otlpprofiles.Stack - if cap(dest) < len(src) { - newDest = make([]*otlpprofiles.Stack, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigStack() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigStack(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigStack() - } - } - for i := range src { - CopyOrigStack(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestStackSlice() []*otlpprofiles.Stack { - orig := make([]*otlpprofiles.Stack, 5) - orig[0] = NewOrigStack() - orig[1] = GenTestOrigStack() - orig[2] = NewOrigStack() - orig[3] = GenTestOrigStack() - orig[4] = NewOrigStack() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_status.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_status.go deleted file mode 100644 index 26d64e86317..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_status.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolStatus = sync.Pool{ - New: func() any { - return &otlptrace.Status{} - }, - } -) - -func NewOrigStatus() *otlptrace.Status { - if !UseProtoPooling.IsEnabled() { - return &otlptrace.Status{} - } - return protoPoolStatus.Get().(*otlptrace.Status) -} - -func DeleteOrigStatus(orig *otlptrace.Status, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - orig.Reset() - if nullable { - protoPoolStatus.Put(orig) - } -} - -func CopyOrigStatus(dest, src *otlptrace.Status) { - // If copying to same object, just return. - if src == dest { - return - } - dest.Message = src.Message - dest.Code = src.Code -} - -func GenTestOrigStatus() *otlptrace.Status { - orig := NewOrigStatus() - orig.Message = "test_message" - orig.Code = otlptrace.Status_StatusCode(1) - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigStatus(orig *otlptrace.Status, dest *json.Stream) { - dest.WriteObjectStart() - if orig.Message != "" { - dest.WriteObjectField("message") - dest.WriteString(orig.Message) - } - - if int32(orig.Code) != 0 { - dest.WriteObjectField("code") - dest.WriteInt32(int32(orig.Code)) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigStatus unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigStatus(orig *otlptrace.Status, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "message": - orig.Message = iter.ReadString() - case "code": - orig.Code = otlptrace.Status_StatusCode(iter.ReadEnumValue(otlptrace.Status_StatusCode_value)) - default: - iter.Skip() - } - } -} - -func SizeProtoOrigStatus(orig *otlptrace.Status) int { - var n int - var l int - _ = l - l = len(orig.Message) - if l > 0 { - n += 1 + proto.Sov(uint64(l)) + l - } - if orig.Code != 0 { - n += 1 + proto.Sov(uint64(orig.Code)) - } - return n -} - -func MarshalProtoOrigStatus(orig *otlptrace.Status, buf []byte) int { - pos := len(buf) - var l int - _ = l - l = len(orig.Message) - if l > 0 { - pos -= l - copy(buf[pos:], orig.Message) - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0x12 - } - if orig.Code != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.Code)) - pos-- - buf[pos] = 0x18 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigStatus(orig *otlptrace.Status, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 2: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.Message = string(buf[startPos:pos]) - - case 3: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.Code = otlptrace.Status_StatusCode(num) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stringslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stringslice.go index 3ed5c18ccba..d922f65d80c 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stringslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stringslice.go @@ -6,32 +6,28 @@ package internal -type StringSlice struct { +type StringSliceWrapper struct { orig *[]string state *State } -func GetOrigStringSlice(ms StringSlice) *[]string { +func GetStringSliceOrig(ms StringSliceWrapper) *[]string { return ms.orig } -func GetStringSliceState(ms StringSlice) *State { +func GetStringSliceState(ms StringSliceWrapper) *State { return ms.state } -func NewStringSlice(orig *[]string, state *State) StringSlice { - return StringSlice{orig: orig, state: state} +func NewStringSliceWrapper(orig *[]string, state *State) StringSliceWrapper { + return StringSliceWrapper{orig: orig, state: state} } -func GenerateTestStringSlice() StringSlice { - orig := GenerateOrigTestStringSlice() - return NewStringSlice(&orig, NewState()) +func GenTestStringSliceWrapper() StringSliceWrapper { + orig := []string{"a", "b", "c"} + return NewStringSliceWrapper(&orig, NewState()) } -func CopyOrigStringSlice(dst, src []string) []string { - return append(dst[:0], src...) -} - -func GenerateOrigTestStringSlice() []string { +func GenTestStringSlice() []string { return []string{"a", "b", "c"} } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sum.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sum.go deleted file mode 100644 index fcc015ad0fe..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sum.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolSum = sync.Pool{ - New: func() any { - return &otlpmetrics.Sum{} - }, - } -) - -func NewOrigSum() *otlpmetrics.Sum { - if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.Sum{} - } - return protoPoolSum.Get().(*otlpmetrics.Sum) -} - -func DeleteOrigSum(orig *otlpmetrics.Sum, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.DataPoints { - DeleteOrigNumberDataPoint(orig.DataPoints[i], true) - } - - orig.Reset() - if nullable { - protoPoolSum.Put(orig) - } -} - -func CopyOrigSum(dest, src *otlpmetrics.Sum) { - // If copying to same object, just return. - if src == dest { - return - } - dest.DataPoints = CopyOrigNumberDataPointSlice(dest.DataPoints, src.DataPoints) - dest.AggregationTemporality = src.AggregationTemporality - dest.IsMonotonic = src.IsMonotonic -} - -func GenTestOrigSum() *otlpmetrics.Sum { - orig := NewOrigSum() - orig.DataPoints = GenerateOrigTestNumberDataPointSlice() - orig.AggregationTemporality = otlpmetrics.AggregationTemporality(1) - orig.IsMonotonic = true - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigSum(orig *otlpmetrics.Sum, dest *json.Stream) { - dest.WriteObjectStart() - if len(orig.DataPoints) > 0 { - dest.WriteObjectField("dataPoints") - dest.WriteArrayStart() - MarshalJSONOrigNumberDataPoint(orig.DataPoints[0], dest) - for i := 1; i < len(orig.DataPoints); i++ { - dest.WriteMore() - MarshalJSONOrigNumberDataPoint(orig.DataPoints[i], dest) - } - dest.WriteArrayEnd() - } - - if int32(orig.AggregationTemporality) != 0 { - dest.WriteObjectField("aggregationTemporality") - dest.WriteInt32(int32(orig.AggregationTemporality)) - } - if orig.IsMonotonic != false { - dest.WriteObjectField("isMonotonic") - dest.WriteBool(orig.IsMonotonic) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigSum unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigSum(orig *otlpmetrics.Sum, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "dataPoints", "data_points": - for iter.ReadArray() { - orig.DataPoints = append(orig.DataPoints, NewOrigNumberDataPoint()) - UnmarshalJSONOrigNumberDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter) - } - - case "aggregationTemporality", "aggregation_temporality": - orig.AggregationTemporality = otlpmetrics.AggregationTemporality(iter.ReadEnumValue(otlpmetrics.AggregationTemporality_value)) - case "isMonotonic", "is_monotonic": - orig.IsMonotonic = iter.ReadBool() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigSum(orig *otlpmetrics.Sum) int { - var n int - var l int - _ = l - for i := range orig.DataPoints { - l = SizeProtoOrigNumberDataPoint(orig.DataPoints[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - if orig.AggregationTemporality != 0 { - n += 1 + proto.Sov(uint64(orig.AggregationTemporality)) - } - if orig.IsMonotonic { - n += 2 - } - return n -} - -func MarshalProtoOrigSum(orig *otlpmetrics.Sum, buf []byte) int { - pos := len(buf) - var l int - _ = l - for i := len(orig.DataPoints) - 1; i >= 0; i-- { - l = MarshalProtoOrigNumberDataPoint(orig.DataPoints[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - if orig.AggregationTemporality != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality)) - pos-- - buf[pos] = 0x10 - } - if orig.IsMonotonic { - pos-- - if orig.IsMonotonic { - buf[pos] = 1 - } else { - buf[pos] = 0 - } - pos-- - buf[pos] = 0x18 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigSum(orig *otlpmetrics.Sum, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.DataPoints = append(orig.DataPoints, NewOrigNumberDataPoint()) - err = UnmarshalProtoOrigNumberDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos]) - if err != nil { - return err - } - - case 2: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.AggregationTemporality = otlpmetrics.AggregationTemporality(num) - - case 3: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.IsMonotonic = num != 0 - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summary.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summary.go deleted file mode 100644 index 215d60ab4ae..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summary.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolSummary = sync.Pool{ - New: func() any { - return &otlpmetrics.Summary{} - }, - } -) - -func NewOrigSummary() *otlpmetrics.Summary { - if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.Summary{} - } - return protoPoolSummary.Get().(*otlpmetrics.Summary) -} - -func DeleteOrigSummary(orig *otlpmetrics.Summary, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - for i := range orig.DataPoints { - DeleteOrigSummaryDataPoint(orig.DataPoints[i], true) - } - - orig.Reset() - if nullable { - protoPoolSummary.Put(orig) - } -} - -func CopyOrigSummary(dest, src *otlpmetrics.Summary) { - // If copying to same object, just return. - if src == dest { - return - } - dest.DataPoints = CopyOrigSummaryDataPointSlice(dest.DataPoints, src.DataPoints) -} - -func GenTestOrigSummary() *otlpmetrics.Summary { - orig := NewOrigSummary() - orig.DataPoints = GenerateOrigTestSummaryDataPointSlice() - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigSummary(orig *otlpmetrics.Summary, dest *json.Stream) { - dest.WriteObjectStart() - if len(orig.DataPoints) > 0 { - dest.WriteObjectField("dataPoints") - dest.WriteArrayStart() - MarshalJSONOrigSummaryDataPoint(orig.DataPoints[0], dest) - for i := 1; i < len(orig.DataPoints); i++ { - dest.WriteMore() - MarshalJSONOrigSummaryDataPoint(orig.DataPoints[i], dest) - } - dest.WriteArrayEnd() - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigSummary unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigSummary(orig *otlpmetrics.Summary, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "dataPoints", "data_points": - for iter.ReadArray() { - orig.DataPoints = append(orig.DataPoints, NewOrigSummaryDataPoint()) - UnmarshalJSONOrigSummaryDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter) - } - - default: - iter.Skip() - } - } -} - -func SizeProtoOrigSummary(orig *otlpmetrics.Summary) int { - var n int - var l int - _ = l - for i := range orig.DataPoints { - l = SizeProtoOrigSummaryDataPoint(orig.DataPoints[i]) - n += 1 + proto.Sov(uint64(l)) + l - } - return n -} - -func MarshalProtoOrigSummary(orig *otlpmetrics.Summary, buf []byte) int { - pos := len(buf) - var l int - _ = l - for i := len(orig.DataPoints) - 1; i >= 0; i-- { - l = MarshalProtoOrigSummaryDataPoint(orig.DataPoints[i], buf[:pos]) - pos -= l - pos = proto.EncodeVarint(buf, pos, uint64(l)) - pos-- - buf[pos] = 0xa - } - return len(buf) - pos -} - -func UnmarshalProtoOrigSummary(orig *otlpmetrics.Summary, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeLen { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var length int - length, pos, err = proto.ConsumeLen(buf, pos) - if err != nil { - return err - } - startPos := pos - length - orig.DataPoints = append(orig.DataPoints, NewOrigSummaryDataPoint()) - err = UnmarshalProtoOrigSummaryDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos]) - if err != nil { - return err - } - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint_valueatquantile.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint_valueatquantile.go deleted file mode 100644 index 3195880d952..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint_valueatquantile.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "encoding/binary" - "fmt" - "math" - "sync" - - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolSummaryDataPoint_ValueAtQuantile = sync.Pool{ - New: func() any { - return &otlpmetrics.SummaryDataPoint_ValueAtQuantile{} - }, - } -) - -func NewOrigSummaryDataPoint_ValueAtQuantile() *otlpmetrics.SummaryDataPoint_ValueAtQuantile { - if !UseProtoPooling.IsEnabled() { - return &otlpmetrics.SummaryDataPoint_ValueAtQuantile{} - } - return protoPoolSummaryDataPoint_ValueAtQuantile.Get().(*otlpmetrics.SummaryDataPoint_ValueAtQuantile) -} - -func DeleteOrigSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - orig.Reset() - if nullable { - protoPoolSummaryDataPoint_ValueAtQuantile.Put(orig) - } -} - -func CopyOrigSummaryDataPoint_ValueAtQuantile(dest, src *otlpmetrics.SummaryDataPoint_ValueAtQuantile) { - // If copying to same object, just return. - if src == dest { - return - } - dest.Quantile = src.Quantile - dest.Value = src.Value -} - -func GenTestOrigSummaryDataPoint_ValueAtQuantile() *otlpmetrics.SummaryDataPoint_ValueAtQuantile { - orig := NewOrigSummaryDataPoint_ValueAtQuantile() - orig.Quantile = float64(3.1415926) - orig.Value = float64(3.1415926) - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, dest *json.Stream) { - dest.WriteObjectStart() - if orig.Quantile != float64(0) { - dest.WriteObjectField("quantile") - dest.WriteFloat64(orig.Quantile) - } - if orig.Value != float64(0) { - dest.WriteObjectField("value") - dest.WriteFloat64(orig.Value) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigSummaryDataPointValueAtQuantile unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "quantile": - orig.Quantile = iter.ReadFloat64() - case "value": - orig.Value = iter.ReadFloat64() - default: - iter.Skip() - } - } -} - -func SizeProtoOrigSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile) int { - var n int - var l int - _ = l - if orig.Quantile != 0 { - n += 9 - } - if orig.Value != 0 { - n += 9 - } - return n -} - -func MarshalProtoOrigSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, buf []byte) int { - pos := len(buf) - var l int - _ = l - if orig.Quantile != 0 { - pos -= 8 - binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Quantile)) - pos-- - buf[pos] = 0x9 - } - if orig.Value != 0 { - pos -= 8 - binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Value)) - pos-- - buf[pos] = 0x11 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeI64 { - return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeI64(buf, pos) - if err != nil { - return err - } - - orig.Quantile = math.Float64frombits(num) - - case 2: - if wireType != proto.WireTypeI64 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeI64(buf, pos) - if err != nil { - return err - } - - orig.Value = math.Float64frombits(num) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint_valueatquantileslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint_valueatquantileslice.go deleted file mode 100644 index e8341dee47c..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint_valueatquantileslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" -) - -func CopyOrigSummaryDataPoint_ValueAtQuantileSlice(dest, src []*otlpmetrics.SummaryDataPoint_ValueAtQuantile) []*otlpmetrics.SummaryDataPoint_ValueAtQuantile { - var newDest []*otlpmetrics.SummaryDataPoint_ValueAtQuantile - if cap(dest) < len(src) { - newDest = make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigSummaryDataPoint_ValueAtQuantile() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigSummaryDataPoint_ValueAtQuantile(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigSummaryDataPoint_ValueAtQuantile() - } - } - for i := range src { - CopyOrigSummaryDataPoint_ValueAtQuantile(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestSummaryDataPoint_ValueAtQuantileSlice() []*otlpmetrics.SummaryDataPoint_ValueAtQuantile { - orig := make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, 5) - orig[0] = NewOrigSummaryDataPoint_ValueAtQuantile() - orig[1] = GenTestOrigSummaryDataPoint_ValueAtQuantile() - orig[2] = NewOrigSummaryDataPoint_ValueAtQuantile() - orig[3] = GenTestOrigSummaryDataPoint_ValueAtQuantile() - orig[4] = NewOrigSummaryDataPoint_ValueAtQuantile() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapointslice.go deleted file mode 100644 index 2da01f377aa..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapointslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" -) - -func CopyOrigSummaryDataPointSlice(dest, src []*otlpmetrics.SummaryDataPoint) []*otlpmetrics.SummaryDataPoint { - var newDest []*otlpmetrics.SummaryDataPoint - if cap(dest) < len(src) { - newDest = make([]*otlpmetrics.SummaryDataPoint, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigSummaryDataPoint() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigSummaryDataPoint(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigSummaryDataPoint() - } - } - for i := range src { - CopyOrigSummaryDataPoint(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestSummaryDataPointSlice() []*otlpmetrics.SummaryDataPoint { - orig := make([]*otlpmetrics.SummaryDataPoint, 5) - orig[0] = NewOrigSummaryDataPoint() - orig[1] = GenTestOrigSummaryDataPoint() - orig[2] = NewOrigSummaryDataPoint() - orig[3] = GenTestOrigSummaryDataPoint() - orig[4] = NewOrigSummaryDataPoint() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go index 954987d85e3..c20fd9d0e15 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go @@ -6,32 +6,28 @@ package internal -type UInt64Slice struct { +type UInt64SliceWrapper struct { orig *[]uint64 state *State } -func GetOrigUInt64Slice(ms UInt64Slice) *[]uint64 { +func GetUInt64SliceOrig(ms UInt64SliceWrapper) *[]uint64 { return ms.orig } -func GetUInt64SliceState(ms UInt64Slice) *State { +func GetUInt64SliceState(ms UInt64SliceWrapper) *State { return ms.state } -func NewUInt64Slice(orig *[]uint64, state *State) UInt64Slice { - return UInt64Slice{orig: orig, state: state} +func NewUInt64SliceWrapper(orig *[]uint64, state *State) UInt64SliceWrapper { + return UInt64SliceWrapper{orig: orig, state: state} } -func GenerateTestUInt64Slice() UInt64Slice { - orig := GenerateOrigTestUint64Slice() - return NewUInt64Slice(&orig, NewState()) +func GenTestUInt64SliceWrapper() UInt64SliceWrapper { + orig := []uint64{1, 2, 3} + return NewUInt64SliceWrapper(&orig, NewState()) } -func CopyOrigUint64Slice(dst, src []uint64) []uint64 { - return append(dst[:0], src...) -} - -func GenerateOrigTestUint64Slice() []uint64 { +func GenTestUint64Slice() []uint64 { return []uint64{1, 2, 3} } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_valuetype.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_valuetype.go deleted file mode 100644 index b0b79601d26..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_valuetype.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - "fmt" - "sync" - - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" - "go.opentelemetry.io/collector/pdata/internal/json" - "go.opentelemetry.io/collector/pdata/internal/proto" -) - -var ( - protoPoolValueType = sync.Pool{ - New: func() any { - return &otlpprofiles.ValueType{} - }, - } -) - -func NewOrigValueType() *otlpprofiles.ValueType { - if !UseProtoPooling.IsEnabled() { - return &otlpprofiles.ValueType{} - } - return protoPoolValueType.Get().(*otlpprofiles.ValueType) -} - -func DeleteOrigValueType(orig *otlpprofiles.ValueType, nullable bool) { - if orig == nil { - return - } - - if !UseProtoPooling.IsEnabled() { - orig.Reset() - return - } - - orig.Reset() - if nullable { - protoPoolValueType.Put(orig) - } -} - -func CopyOrigValueType(dest, src *otlpprofiles.ValueType) { - // If copying to same object, just return. - if src == dest { - return - } - dest.TypeStrindex = src.TypeStrindex - dest.UnitStrindex = src.UnitStrindex - dest.AggregationTemporality = src.AggregationTemporality -} - -func GenTestOrigValueType() *otlpprofiles.ValueType { - orig := NewOrigValueType() - orig.TypeStrindex = int32(13) - orig.UnitStrindex = int32(13) - orig.AggregationTemporality = otlpprofiles.AggregationTemporality(1) - return orig -} - -// MarshalJSONOrig marshals all properties from the current struct to the destination stream. -func MarshalJSONOrigValueType(orig *otlpprofiles.ValueType, dest *json.Stream) { - dest.WriteObjectStart() - if orig.TypeStrindex != int32(0) { - dest.WriteObjectField("typeStrindex") - dest.WriteInt32(orig.TypeStrindex) - } - if orig.UnitStrindex != int32(0) { - dest.WriteObjectField("unitStrindex") - dest.WriteInt32(orig.UnitStrindex) - } - - if int32(orig.AggregationTemporality) != 0 { - dest.WriteObjectField("aggregationTemporality") - dest.WriteInt32(int32(orig.AggregationTemporality)) - } - dest.WriteObjectEnd() -} - -// UnmarshalJSONOrigValueType unmarshals all properties from the current struct from the source iterator. -func UnmarshalJSONOrigValueType(orig *otlpprofiles.ValueType, iter *json.Iterator) { - for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { - switch f { - case "typeStrindex", "type_strindex": - orig.TypeStrindex = iter.ReadInt32() - case "unitStrindex", "unit_strindex": - orig.UnitStrindex = iter.ReadInt32() - case "aggregationTemporality", "aggregation_temporality": - orig.AggregationTemporality = otlpprofiles.AggregationTemporality(iter.ReadEnumValue(otlpprofiles.AggregationTemporality_value)) - default: - iter.Skip() - } - } -} - -func SizeProtoOrigValueType(orig *otlpprofiles.ValueType) int { - var n int - var l int - _ = l - if orig.TypeStrindex != 0 { - n += 1 + proto.Sov(uint64(orig.TypeStrindex)) - } - if orig.UnitStrindex != 0 { - n += 1 + proto.Sov(uint64(orig.UnitStrindex)) - } - if orig.AggregationTemporality != 0 { - n += 1 + proto.Sov(uint64(orig.AggregationTemporality)) - } - return n -} - -func MarshalProtoOrigValueType(orig *otlpprofiles.ValueType, buf []byte) int { - pos := len(buf) - var l int - _ = l - if orig.TypeStrindex != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.TypeStrindex)) - pos-- - buf[pos] = 0x8 - } - if orig.UnitStrindex != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.UnitStrindex)) - pos-- - buf[pos] = 0x10 - } - if orig.AggregationTemporality != 0 { - pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality)) - pos-- - buf[pos] = 0x18 - } - return len(buf) - pos -} - -func UnmarshalProtoOrigValueType(orig *otlpprofiles.ValueType, buf []byte) error { - var err error - var fieldNum int32 - var wireType proto.WireType - - l := len(buf) - pos := 0 - for pos < l { - // If in a group parsing, move to the next tag. - fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos) - if err != nil { - return err - } - switch fieldNum { - - case 1: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field TypeStrindex", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.TypeStrindex = int32(num) - - case 2: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.UnitStrindex = int32(num) - - case 3: - if wireType != proto.WireTypeVarint { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) - } - var num uint64 - num, pos, err = proto.ConsumeVarint(buf, pos) - if err != nil { - return err - } - - orig.AggregationTemporality = otlpprofiles.AggregationTemporality(num) - default: - pos, err = proto.ConsumeUnknown(buf, pos, wireType) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_valuetypeslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_valuetypeslice.go deleted file mode 100644 index 6b1973a7183..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_valuetypeslice.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package internal - -import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - -func CopyOrigValueTypeSlice(dest, src []*otlpprofiles.ValueType) []*otlpprofiles.ValueType { - var newDest []*otlpprofiles.ValueType - if cap(dest) < len(src) { - newDest = make([]*otlpprofiles.ValueType, len(src)) - // Copy old pointers to re-use. - copy(newDest, dest) - // Add new pointers for missing elements from len(dest) to len(srt). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigValueType() - } - } else { - newDest = dest[:len(src)] - // Cleanup the rest of the elements so GC can free the memory. - // This can happen when len(src) < len(dest) < cap(dest). - for i := len(src); i < len(dest); i++ { - DeleteOrigValueType(dest[i], true) - dest[i] = nil - } - // Add new pointers for missing elements. - // This can happen when len(dest) < len(src) < cap(dest). - for i := len(dest); i < len(src); i++ { - newDest[i] = NewOrigValueType() - } - } - for i := range src { - CopyOrigValueType(newDest[i], src[i]) - } - return newDest -} - -func GenerateOrigTestValueTypeSlice() []*otlpprofiles.ValueType { - orig := make([]*otlpprofiles.ValueType, 5) - orig[0] = NewOrigValueType() - orig[1] = GenTestOrigValueType() - orig[2] = NewOrigValueType() - orig[3] = GenTestOrigValueType() - orig[4] = NewOrigValueType() - return orig -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/grpcencoding/encoding.go b/vendor/go.opentelemetry.io/collector/pdata/internal/grpcencoding/encoding.go deleted file mode 100644 index f2df60ab2c0..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/grpcencoding/encoding.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package grpcencoding // import "go.opentelemetry.io/collector/pdata/internal/grpcencoding" - -import ( - "google.golang.org/grpc/encoding" - "google.golang.org/grpc/mem" - - "go.opentelemetry.io/collector/pdata/internal" - otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" - otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" - otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" - otlpcollectortraces "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" -) - -var ( - defaultBufferPoolSizes = []int{ - 256, - 4 << 10, // 4KB (go page size) - 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) - 32 << 10, // 32KB (default buffer size for io.Copy) - 512 << 10, // 512KB - 1 << 20, // 1MB - 4 << 20, // 4MB - 16 << 20, // 16MB - } - otelBufferPool = mem.NewTieredBufferPool(defaultBufferPoolSizes...) -) - -// DefaultBufferPool returns the current default buffer pool. It is a BufferPool -// created with mem.NewTieredBufferPool that uses a set of default sizes optimized for -// expected telemetry workflows. -func DefaultBufferPool() mem.BufferPool { - return otelBufferPool -} - -// Name is the name registered for the proto compressor. -const Name = "proto" - -func init() { - encoding.RegisterCodecV2(&codecV2{delegate: encoding.GetCodecV2(Name)}) -} - -// codecV2 is a custom proto encoding that uses a different tier schema for the TieredBufferPool as well -// as it call into the custom marshal/unmarshal logic that works with memory pooling. -// If not an otlp payload fallback on the default grpc/proto encoding. -type codecV2 struct { - delegate encoding.CodecV2 -} - -func (c *codecV2) Marshal(v any) (mem.BufferSlice, error) { - switch req := v.(type) { - case *otlpcollectorlogs.ExportLogsServiceRequest: - size := internal.SizeProtoOrigExportLogsServiceRequest(req) - buf := otelBufferPool.Get(size) - n := internal.MarshalProtoOrigExportLogsServiceRequest(req, (*buf)[:size]) - *buf = (*buf)[:n] - return []mem.Buffer{mem.NewBuffer(buf, otelBufferPool)}, nil - case *otlpcollectormetrics.ExportMetricsServiceRequest: - size := internal.SizeProtoOrigExportMetricsServiceRequest(req) - buf := otelBufferPool.Get(size) - n := internal.MarshalProtoOrigExportMetricsServiceRequest(req, (*buf)[:size]) - *buf = (*buf)[:n] - return []mem.Buffer{mem.NewBuffer(buf, otelBufferPool)}, nil - case *otlpcollectortraces.ExportTraceServiceRequest: - size := internal.SizeProtoOrigExportTraceServiceRequest(req) - buf := otelBufferPool.Get(size) - n := internal.MarshalProtoOrigExportTraceServiceRequest(req, (*buf)[:size]) - *buf = (*buf)[:n] - return []mem.Buffer{mem.NewBuffer(buf, otelBufferPool)}, nil - case *otlpcollectorprofile.ExportProfilesServiceRequest: - size := internal.SizeProtoOrigExportProfilesServiceRequest(req) - buf := otelBufferPool.Get(size) - n := internal.MarshalProtoOrigExportProfilesServiceRequest(req, (*buf)[:size]) - *buf = (*buf)[:n] - return []mem.Buffer{mem.NewBuffer(buf, otelBufferPool)}, nil - } - - return c.delegate.Marshal(v) -} - -func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { - switch req := v.(type) { - case *otlpcollectorlogs.ExportLogsServiceRequest: - // TODO: Upgrade custom Unmarshal logic to support reading from mem.BufferSlice. - buf := data.MaterializeToBuffer(otelBufferPool) - defer buf.Free() - return internal.UnmarshalProtoOrigExportLogsServiceRequest(req, buf.ReadOnlyData()) - case *otlpcollectormetrics.ExportMetricsServiceRequest: - // TODO: Upgrade custom Unmarshal logic to support reading from mem.BufferSlice. - buf := data.MaterializeToBuffer(otelBufferPool) - defer buf.Free() - return internal.UnmarshalProtoOrigExportMetricsServiceRequest(req, buf.ReadOnlyData()) - case *otlpcollectortraces.ExportTraceServiceRequest: - // TODO: Upgrade custom Unmarshal logic to support reading from mem.BufferSlice. - buf := data.MaterializeToBuffer(otelBufferPool) - defer buf.Free() - return internal.UnmarshalProtoOrigExportTraceServiceRequest(req, buf.ReadOnlyData()) - case *otlpcollectorprofile.ExportProfilesServiceRequest: - // TODO: Upgrade custom Unmarshal logic to support reading from mem.BufferSlice. - buf := data.MaterializeToBuffer(otelBufferPool) - defer buf.Free() - return internal.UnmarshalProtoOrigExportProfilesServiceRequest(req, buf.ReadOnlyData()) - } - - return c.delegate.Unmarshal(data, v) -} - -func (c *codecV2) Name() string { - return Name -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/ids.go b/vendor/go.opentelemetry.io/collector/pdata/internal/ids.go deleted file mode 100644 index 59b9a9456ad..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/ids.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/collector/pdata/internal" - -import ( - "go.opentelemetry.io/collector/pdata/internal/data" - "go.opentelemetry.io/collector/pdata/internal/json" -) - -func DeleteOrigTraceID(*data.TraceID, bool) {} - -func DeleteOrigSpanID(*data.SpanID, bool) {} - -func DeleteOrigProfileID(*data.ProfileID, bool) {} - -func GenTestOrigTraceID() *data.TraceID { - id := data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) - return &id -} - -func GenTestOrigSpanID() *data.SpanID { - id := data.SpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}) - return &id -} - -func GenTestOrigProfileID() *data.ProfileID { - id := data.ProfileID([16]byte{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) - return &id -} - -func MarshalJSONOrigTraceID(id *data.TraceID, dest *json.Stream) { - id.MarshalJSONStream(dest) -} - -func MarshalJSONOrigSpanID(id *data.SpanID, dest *json.Stream) { - id.MarshalJSONStream(dest) -} - -func MarshalJSONOrigProfileID(id *data.ProfileID, dest *json.Stream) { - id.MarshalJSONStream(dest) -} - -func UnmarshalJSONOrigTraceID(id *data.TraceID, iter *json.Iterator) { - id.UnmarshalJSONIter(iter) -} - -func UnmarshalJSONOrigSpanID(id *data.SpanID, iter *json.Iterator) { - id.UnmarshalJSONIter(iter) -} - -func UnmarshalJSONOrigProfileID(id *data.ProfileID, iter *json.Iterator) { - id.UnmarshalJSONIter(iter) -} - -func SizeProtoOrigTraceID(id *data.TraceID) int { - return id.Size() -} - -func SizeProtoOrigSpanID(id *data.SpanID) int { - return id.Size() -} - -func SizeProtoOrigProfileID(id *data.ProfileID) int { - return id.Size() -} - -func MarshalProtoOrigTraceID(id *data.TraceID, buf []byte) int { - size := id.Size() - _, _ = id.MarshalTo(buf[len(buf)-size:]) - return size -} - -func MarshalProtoOrigSpanID(id *data.SpanID, buf []byte) int { - size := id.Size() - _, _ = id.MarshalTo(buf[len(buf)-size:]) - return size -} - -func MarshalProtoOrigProfileID(id *data.ProfileID, buf []byte) int { - size := id.Size() - _, _ = id.MarshalTo(buf[len(buf)-size:]) - return size -} - -func UnmarshalProtoOrigTraceID(id *data.TraceID, buf []byte) error { - return id.Unmarshal(buf) -} - -func UnmarshalProtoOrigSpanID(id *data.SpanID, buf []byte) error { - return id.Unmarshal(buf) -} - -func UnmarshalProtoOrigProfileID(id *data.ProfileID, buf []byte) error { - return id.Unmarshal(buf) -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/encoding.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/encoding.go new file mode 100644 index 00000000000..4f3bfdb6ccc --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/encoding.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc" + +import ( + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" +) + +var ( + defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 512 << 10, // 512KB + 1 << 20, // 1MB + 4 << 20, // 4MB + 16 << 20, // 16MB + } + otelBufferPool = mem.NewTieredBufferPool(defaultBufferPoolSizes...) +) + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with mem.NewTieredBufferPool that uses a set of default sizes optimized for +// expected telemetry workflows. +func DefaultBufferPool() mem.BufferPool { + return otelBufferPool +} + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodecV2(&codecV2{delegate: encoding.GetCodecV2(Name)}) +} + +// codecV2 is a custom proto encoding that uses a different tier schema for the TieredBufferPool as well +// as it call into the custom marshal/unmarshal logic that works with memory pooling. +// If not an otlp payload fallback on the default grpc/proto encoding. +type codecV2 struct { + delegate encoding.CodecV2 +} + +type otelEncoder interface { + SizeProto() int + MarshalProto([]byte) int + UnmarshalProto([]byte) error +} + +func (c *codecV2) Marshal(v any) (mem.BufferSlice, error) { + if m, ok := v.(otelEncoder); ok { + size := m.SizeProto() + buf := otelBufferPool.Get(size) + n := m.MarshalProto((*buf)[:size]) + *buf = (*buf)[:n] + return []mem.Buffer{mem.NewBuffer(buf, otelBufferPool)}, nil + } + + return c.delegate.Marshal(v) +} + +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { + if m, ok := v.(otelEncoder); ok { + // TODO: Upgrade custom Unmarshal logic to support reading from mem.BufferSlice. + buf := data.MaterializeToBuffer(otelBufferPool) + defer buf.Free() + return m.UnmarshalProto(buf.ReadOnlyData()) + } + + return c.delegate.Unmarshal(data, v) +} + +func (c *codecV2) Name() string { + return Name +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/logs_service.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/logs_service.go new file mode 100644 index 00000000000..ef26e365485 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/logs_service.go @@ -0,0 +1,88 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc" + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/collector/pdata/internal" +) + +// LogsServiceClient is the client API for LogsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LogsServiceClient interface { + Export(context.Context, *internal.ExportLogsServiceRequest, ...grpc.CallOption) (*internal.ExportLogsServiceResponse, error) +} + +type logsServiceClient struct { + cc *grpc.ClientConn +} + +func NewLogsServiceClient(cc *grpc.ClientConn) LogsServiceClient { + return &logsServiceClient{cc} +} + +func (c *logsServiceClient) Export(ctx context.Context, in *internal.ExportLogsServiceRequest, opts ...grpc.CallOption) (*internal.ExportLogsServiceResponse, error) { + out := new(internal.ExportLogsServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LogsServiceServer is the server API for LogsService service. +type LogsServiceServer interface { + Export(context.Context, *internal.ExportLogsServiceRequest) (*internal.ExportLogsServiceResponse, error) +} + +// UnimplementedLogsServiceServer can be embedded to have forward compatible implementations. +type UnimplementedLogsServiceServer struct{} + +func (*UnimplementedLogsServiceServer) Export(context.Context, *internal.ExportLogsServiceRequest) (*internal.ExportLogsServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterLogsServiceServer(s *grpc.Server, srv LogsServiceServer) { + s.RegisterService(&logsServiceServiceDesc, srv) +} + +// Context cannot be the first parameter of the function because gRPC definition. +// +//nolint:revive +func logsServiceExportHandler(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) { + in := new(internal.ExportLogsServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LogsServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export", + } + handler := func(ctx context.Context, req any) (any, error) { + return srv.(LogsServiceServer).Export(ctx, req.(*internal.ExportLogsServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var logsServiceServiceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService", + HandlerType: (*LogsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: logsServiceExportHandler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto", +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/metrics_service.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/metrics_service.go new file mode 100644 index 00000000000..daf4f5cc8a9 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/metrics_service.go @@ -0,0 +1,88 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc" + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/collector/pdata/internal" +) + +// MetricsServiceClient is the client API for MetricsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricsServiceClient interface { + Export(context.Context, *internal.ExportMetricsServiceRequest, ...grpc.CallOption) (*internal.ExportMetricsServiceResponse, error) +} + +type metricsServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) Export(ctx context.Context, in *internal.ExportMetricsServiceRequest, opts ...grpc.CallOption) (*internal.ExportMetricsServiceResponse, error) { + out := new(internal.ExportMetricsServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricsServiceServer is the server API for MetricsService service. +type MetricsServiceServer interface { + Export(context.Context, *internal.ExportMetricsServiceRequest) (*internal.ExportMetricsServiceResponse, error) +} + +// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetricsServiceServer struct{} + +func (*UnimplementedMetricsServiceServer) Export(context.Context, *internal.ExportMetricsServiceRequest) (*internal.ExportMetricsServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { + s.RegisterService(&metricsServiceServiceDesc, srv) +} + +// Context cannot be the first parameter of the function because gRPC definition. +// +//nolint:revive +func metricsServiceExportHandler(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) { + in := new(internal.ExportMetricsServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", + } + handler := func(ctx context.Context, req any) (any, error) { + return srv.(MetricsServiceServer).Export(ctx, req.(*internal.ExportMetricsServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var metricsServiceServiceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: metricsServiceExportHandler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto", +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/profiles_service.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/profiles_service.go new file mode 100644 index 00000000000..db846146ff8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/profiles_service.go @@ -0,0 +1,88 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc" + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/collector/pdata/internal" +) + +// ProfilesServiceClient is the client API for ProfilesService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProfilesServiceClient interface { + Export(context.Context, *internal.ExportProfilesServiceRequest, ...grpc.CallOption) (*internal.ExportProfilesServiceResponse, error) +} + +type profilesServiceClient struct { + cc *grpc.ClientConn +} + +func NewProfilesServiceClient(cc *grpc.ClientConn) ProfilesServiceClient { + return &profilesServiceClient{cc} +} + +func (c *profilesServiceClient) Export(ctx context.Context, in *internal.ExportProfilesServiceRequest, opts ...grpc.CallOption) (*internal.ExportProfilesServiceResponse, error) { + out := new(internal.ExportProfilesServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProfilesServiceServer is the server API for ProfilesService service. +type ProfilesServiceServer interface { + Export(context.Context, *internal.ExportProfilesServiceRequest) (*internal.ExportProfilesServiceResponse, error) +} + +// UnimplementedProfilesServiceServer can be embedded to have forward compatible implementations. +type UnimplementedProfilesServiceServer struct{} + +func (*UnimplementedProfilesServiceServer) Export(context.Context, *internal.ExportProfilesServiceRequest) (*internal.ExportProfilesServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterProfilesServiceServer(s *grpc.Server, srv ProfilesServiceServer) { + s.RegisterService(&profilesServiceServiceDesc, srv) +} + +// Context cannot be the first parameter of the function because gRPC definition. +// +//nolint:revive +func profilesServiceExportHandler(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) { + in := new(internal.ExportProfilesServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProfilesServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export", + } + handler := func(ctx context.Context, req any) (any, error) { + return srv.(ProfilesServiceServer).Export(ctx, req.(*internal.ExportProfilesServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var profilesServiceServiceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.profiles.v1development.ProfilesService", + HandlerType: (*ProfilesServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: profilesServiceExportHandler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/profiles/v1development/profiles_service.proto", +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/trace_service.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/trace_service.go new file mode 100644 index 00000000000..943f61792bd --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/trace_service.go @@ -0,0 +1,88 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc" + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/collector/pdata/internal" +) + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TraceServiceClient interface { + Export(context.Context, *internal.ExportTraceServiceRequest, ...grpc.CallOption) (*internal.ExportTraceServiceResponse, error) +} + +type traceServiceClient struct { + cc *grpc.ClientConn +} + +func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Export(ctx context.Context, in *internal.ExportTraceServiceRequest, opts ...grpc.CallOption) (*internal.ExportTraceServiceResponse, error) { + out := new(internal.ExportTraceServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TraceServiceServer is the server API for TraceService service. +type TraceServiceServer interface { + Export(context.Context, *internal.ExportTraceServiceRequest) (*internal.ExportTraceServiceResponse, error) +} + +// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations. +type UnimplementedTraceServiceServer struct{} + +func (*UnimplementedTraceServiceServer) Export(context.Context, *internal.ExportTraceServiceRequest) (*internal.ExportTraceServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&traceServiceServiceDesc, srv) +} + +// Context cannot be the first parameter of the function because gRPC definition. +// +//nolint:revive +func traceServiceExportHandler(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) { + in := new(internal.ExportTraceServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export", + } + handler := func(ctx context.Context, req any) (any, error) { + return srv.(TraceServiceServer).Export(ctx, req.(*internal.ExportTraceServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var traceServiceServiceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: traceServiceExportHandler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto", +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go index c0328a5b41d..aad78de4e22 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go @@ -4,12 +4,12 @@ package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp" import ( - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" + "go.opentelemetry.io/collector/pdata/internal" ) // MigrateLogs implements any translation needed due to deprecation in OTLP logs protocol. // Any plog.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation. -func MigrateLogs(rls []*otlplogs.ResourceLogs) { +func MigrateLogs(rls []*internal.ResourceLogs) { for _, rl := range rls { if len(rl.ScopeLogs) == 0 { rl.ScopeLogs = rl.DeprecatedScopeLogs diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go index 9a7da14868c..fb1776c4ffd 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go @@ -4,12 +4,12 @@ package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp" import ( - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/pdata/internal" ) // MigrateMetrics implements any translation needed due to deprecation in OTLP metrics protocol. // Any pmetric.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation. -func MigrateMetrics(rms []*otlpmetrics.ResourceMetrics) { +func MigrateMetrics(rms []*internal.ResourceMetrics) { for _, rm := range rms { if len(rm.ScopeMetrics) == 0 { rm.ScopeMetrics = rm.DeprecatedScopeMetrics diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go index 59c23cc672b..5144e7c4d2f 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go @@ -4,9 +4,9 @@ package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp" import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/internal" ) // MigrateProfiles implements any translation needed due to deprecation in OTLP profiles protocol. // Any pprofile.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation. -func MigrateProfiles(_ []*otlpprofiles.ResourceProfiles) {} +func MigrateProfiles(_ []*internal.ResourceProfiles) {} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go index 627881fc3dc..84f5deadef0 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go @@ -4,12 +4,12 @@ package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp" import ( - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" + "go.opentelemetry.io/collector/pdata/internal" ) // MigrateTraces implements any translation needed due to deprecation in OTLP traces protocol. // Any ptrace.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation. -func MigrateTraces(rss []*otlptrace.ResourceSpans) { +func MigrateTraces(rss []*internal.ResourceSpans) { for _, rs := range rss { if len(rs.ScopeSpans) == 0 { rs.ScopeSpans = rs.DeprecatedScopeSpans diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/profileid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/profileid.go new file mode 100644 index 00000000000..5a038fe0854 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/profileid.go @@ -0,0 +1,83 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal" + +import ( + "encoding/hex" + "errors" + + "go.opentelemetry.io/collector/pdata/internal/json" +) + +const profileIDSize = 16 + +var errUnmarshalProfileID = errors.New("unmarshal: invalid ProfileID length") + +// ProfileID is a custom data type that is used for all profile_id fields in OTLP +// Protobuf messages. +type ProfileID [profileIDSize]byte + +func DeleteProfileID(*ProfileID, bool) {} + +func CopyProfileID(dest, src *ProfileID) { + *dest = *src +} + +// IsEmpty returns true if id contains at leas one non-zero byte. +func (pid ProfileID) IsEmpty() bool { + return pid == [profileIDSize]byte{} +} + +// SizeProto returns the size of the data to serialize in proto format. +func (pid ProfileID) SizeProto() int { + if pid.IsEmpty() { + return 0 + } + + return profileIDSize +} + +// MarshalProto converts profile ID into a binary representation. Called by Protobuf serialization. +func (pid ProfileID) MarshalProto(buf []byte) int { + if pid.IsEmpty() { + return 0 + } + + return copy(buf[len(buf)-profileIDSize:], pid[:]) +} + +// UnmarshalProto inflates this profile ID from binary representation. Called by Protobuf serialization. +func (pid *ProfileID) UnmarshalProto(buf []byte) error { + if len(buf) == 0 { + *pid = [profileIDSize]byte{} + return nil + } + + if len(buf) != profileIDSize { + return errUnmarshalProfileID + } + + copy(pid[:], buf) + return nil +} + +// MarshalJSON converts ProfileID into a hex string. +// +//nolint:govet +func (pid ProfileID) MarshalJSON(dest *json.Stream) { + dest.WriteString(hex.EncodeToString(pid[:])) +} + +// UnmarshalJSON decodes ProfileID from hex string. +// +//nolint:govet +func (pid *ProfileID) UnmarshalJSON(iter *json.Iterator) { + *pid = [profileIDSize]byte{} + unmarshalJSON(pid[:], iter) +} + +func GenTestProfileID() *ProfileID { + pid := ProfileID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) + return &pid +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/proto/marshal.go b/vendor/go.opentelemetry.io/collector/pdata/internal/proto/marshal.go index 1655533a2ce..b4d494e6e54 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/proto/marshal.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/proto/marshal.go @@ -8,7 +8,6 @@ func EncodeVarint(buf []byte, offset int, v uint64) int { offset -= Sov(v) base := offset for v >= 1<<7 { - //nolint:gosec buf[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/proto/size.go b/vendor/go.opentelemetry.io/collector/pdata/internal/proto/size.go index 6f48443bf9d..bfc52147599 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/proto/size.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/proto/size.go @@ -12,6 +12,5 @@ func Sov(x uint64) (n int) { } func Soz(x uint64) (n int) { - //nolint:gosec return Sov((x << 1) ^ uint64((int64(x) >> 63))) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/proto/unmarshal.go b/vendor/go.opentelemetry.io/collector/pdata/internal/proto/unmarshal.go index 6ecf5ca3a3a..b6dc0c79495 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/proto/unmarshal.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/proto/unmarshal.go @@ -89,7 +89,6 @@ func ConsumeLen(buf []byte, pos int) (int, int, error) { if err != nil { return 0, 0, err } - //nolint:gosec length := int(num) if length < 0 { return 0, 0, ErrInvalidLength @@ -116,9 +115,7 @@ func ConsumeTag(buf []byte, pos int) (int32, WireType, int, error) { if err != nil { return 0, 0, 0, err } - //nolint:gosec fieldNum := int32(tag >> 3) - //nolint:gosec wireType := int8(tag & 0x7) if fieldNum <= 0 { return 0, 0, 0, fmt.Errorf("proto: Link: illegal field=%d (tag=%d, pos=%d)", fieldNum, tag, pos) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/spanid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/spanid.go new file mode 100644 index 00000000000..9ec0f465fb0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/spanid.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal" + +import ( + "encoding/hex" + "errors" + + "go.opentelemetry.io/collector/pdata/internal/json" +) + +const spanIDSize = 8 + +var errUnmarshalSpanID = errors.New("unmarshal: invalid SpanID length") + +// SpanID is a custom data type that is used for all span_id fields in OTLP +// Protobuf messages. +type SpanID [spanIDSize]byte + +func DeleteSpanID(*SpanID, bool) {} + +func CopySpanID(dest, src *SpanID) { + *dest = *src +} + +// IsEmpty returns true if id contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// SizeProto returns the size of the data to serialize in proto format. +func (sid SpanID) SizeProto() int { + if sid.IsEmpty() { + return 0 + } + return spanIDSize +} + +// MarshalProto converts span ID into a binary representation. Called by Protobuf serialization. +func (sid SpanID) MarshalProto(buf []byte) int { + if sid.IsEmpty() { + return 0 + } + + return copy(buf[len(buf)-spanIDSize:], sid[:]) +} + +// UnmarshalProto inflates this span ID from binary representation. Called by Protobuf serialization. +func (sid *SpanID) UnmarshalProto(data []byte) error { + if len(data) == 0 { + *sid = [spanIDSize]byte{} + return nil + } + + if len(data) != spanIDSize { + return errUnmarshalSpanID + } + + copy(sid[:], data) + return nil +} + +// MarshalJSON converts SpanID into a hex string. +// +//nolint:govet +func (sid SpanID) MarshalJSON(dest *json.Stream) { + dest.WriteString(hex.EncodeToString(sid[:])) +} + +// UnmarshalJSON decodes SpanID from hex string. +// +//nolint:govet +func (sid *SpanID) UnmarshalJSON(iter *json.Iterator) { + *sid = [spanIDSize]byte{} + unmarshalJSON(sid[:], iter) +} + +func GenTestSpanID() *SpanID { + sid := SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1}) + return &sid +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/traceid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/traceid.go new file mode 100644 index 00000000000..fe54123de6d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/traceid.go @@ -0,0 +1,83 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal" + +import ( + "encoding/hex" + "errors" + + "go.opentelemetry.io/collector/pdata/internal/json" +) + +const traceIDSize = 16 + +var errUnmarshalTraceID = errors.New("unmarshal: invalid TraceID length") + +// TraceID is a custom data type that is used for all trace_id fields in OTLP +// Protobuf messages. +type TraceID [traceIDSize]byte + +func DeleteTraceID(*TraceID, bool) {} + +func CopyTraceID(dest, src *TraceID) { + *dest = *src +} + +// IsEmpty returns true if id contains at leas one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// SizeProto returns the size of the data to serialize in proto format. +func (tid TraceID) SizeProto() int { + if tid.IsEmpty() { + return 0 + } + + return traceIDSize +} + +// MarshalProto converts trace ID into a binary representation. Called by Protobuf serialization. +func (tid TraceID) MarshalProto(buf []byte) int { + if tid.IsEmpty() { + return 0 + } + + return copy(buf[len(buf)-traceIDSize:], tid[:]) +} + +// UnmarshalProto inflates this trace ID from binary representation. Called by Protobuf serialization. +func (tid *TraceID) UnmarshalProto(buf []byte) error { + if len(buf) == 0 { + *tid = [traceIDSize]byte{} + return nil + } + + if len(buf) != traceIDSize { + return errUnmarshalTraceID + } + + copy(tid[:], buf) + return nil +} + +// MarshalJSON converts TraceID into a hex string. +// +//nolint:govet +func (tid TraceID) MarshalJSON(dest *json.Stream) { + dest.WriteString(hex.EncodeToString(tid[:])) +} + +// UnmarshalJSON decodes TraceID from hex string. +// +//nolint:govet +func (tid *TraceID) UnmarshalJSON(iter *json.Iterator) { + *tid = [profileIDSize]byte{} + unmarshalJSON(tid[:], iter) +} + +func GenTestTraceID() *TraceID { + tid := TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) + return &tid +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go index 31b20766cf9..b47b508336c 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go @@ -3,22 +3,17 @@ package internal // import "go.opentelemetry.io/collector/pdata/internal" -import ( - otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" -) - // LogsToProto internal helper to convert Logs to protobuf representation. -func LogsToProto(l Logs) otlplogs.LogsData { - return otlplogs.LogsData{ +func LogsToProto(l LogsWrapper) LogsData { + return LogsData{ ResourceLogs: l.orig.ResourceLogs, } } // LogsFromProto internal helper to convert protobuf representation to Logs. // This function set exclusive state assuming that it's called only once per Logs. -func LogsFromProto(orig otlplogs.LogsData) Logs { - return NewLogs(&otlpcollectorlog.ExportLogsServiceRequest{ +func LogsFromProto(orig LogsData) LogsWrapper { + return NewLogsWrapper(&ExportLogsServiceRequest{ ResourceLogs: orig.ResourceLogs, }, NewState()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go index 33453c0e6f3..c5c6ca7f6da 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go @@ -3,28 +3,24 @@ package internal // import "go.opentelemetry.io/collector/pdata/internal" -import ( - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" -) - -type Map struct { - orig *[]otlpcommon.KeyValue +type MapWrapper struct { + orig *[]KeyValue state *State } -func GetOrigMap(ms Map) *[]otlpcommon.KeyValue { +func GetMapOrig(ms MapWrapper) *[]KeyValue { return ms.orig } -func GetMapState(ms Map) *State { +func GetMapState(ms MapWrapper) *State { return ms.state } -func NewMap(orig *[]otlpcommon.KeyValue, state *State) Map { - return Map{orig: orig, state: state} +func NewMapWrapper(orig *[]KeyValue, state *State) MapWrapper { + return MapWrapper{orig: orig, state: state} } -func GenerateTestMap() Map { - orig := GenerateOrigTestKeyValueSlice() - return NewMap(&orig, NewState()) +func GenTestMapWrapper() MapWrapper { + orig := GenTestKeyValueSlice() + return NewMapWrapper(&orig, NewState()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go index ef684b2842d..3cb790395fc 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go @@ -3,22 +3,17 @@ package internal // import "go.opentelemetry.io/collector/pdata/internal" -import ( - otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" -) - // MetricsToProto internal helper to convert Metrics to protobuf representation. -func MetricsToProto(l Metrics) otlpmetrics.MetricsData { - return otlpmetrics.MetricsData{ +func MetricsToProto(l MetricsWrapper) MetricsData { + return MetricsData{ ResourceMetrics: l.orig.ResourceMetrics, } } // MetricsFromProto internal helper to convert protobuf representation to Metrics. // This function set exclusive state assuming that it's called only once per Metrics. -func MetricsFromProto(orig otlpmetrics.MetricsData) Metrics { - return NewMetrics(&otlpcollectormetrics.ExportMetricsServiceRequest{ +func MetricsFromProto(orig MetricsData) MetricsWrapper { + return NewMetricsWrapper(&ExportMetricsServiceRequest{ ResourceMetrics: orig.ResourceMetrics, }, NewState()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go index add52635614..1a56d922975 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go @@ -3,14 +3,9 @@ package internal // import "go.opentelemetry.io/collector/pdata/internal" -import ( - otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" - otlpprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" -) - // ProfilesToProto internal helper to convert Profiles to protobuf representation. -func ProfilesToProto(l Profiles) otlpprofile.ProfilesData { - return otlpprofile.ProfilesData{ +func ProfilesToProto(l ProfilesWrapper) ProfilesData { + return ProfilesData{ ResourceProfiles: l.orig.ResourceProfiles, Dictionary: l.orig.Dictionary, } @@ -18,8 +13,8 @@ func ProfilesToProto(l Profiles) otlpprofile.ProfilesData { // ProfilesFromProto internal helper to convert protobuf representation to Profiles. // This function set exclusive state assuming that it's called only once per Profiles. -func ProfilesFromProto(orig otlpprofile.ProfilesData) Profiles { - return NewProfiles(&otlpcollectorprofile.ExportProfilesServiceRequest{ +func ProfilesFromProto(orig ProfilesData) ProfilesWrapper { + return NewProfilesWrapper(&ExportProfilesServiceRequest{ ResourceProfiles: orig.ResourceProfiles, Dictionary: orig.Dictionary, }, NewState()) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go index bdc7d93bbab..758a1cb27e0 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go @@ -3,22 +3,17 @@ package internal // import "go.opentelemetry.io/collector/pdata/internal" -import ( - otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" -) - // TracesToProto internal helper to convert Traces to protobuf representation. -func TracesToProto(l Traces) otlptrace.TracesData { - return otlptrace.TracesData{ +func TracesToProto(l TracesWrapper) TracesData { + return TracesData{ ResourceSpans: l.orig.ResourceSpans, } } // TracesFromProto internal helper to convert protobuf representation to Traces. // This function set exclusive state assuming that it's called only once per Traces. -func TracesFromProto(orig otlptrace.TracesData) Traces { - return NewTraces(&otlpcollectortrace.ExportTraceServiceRequest{ +func TracesFromProto(orig TracesData) TracesWrapper { + return NewTracesWrapper(&ExportTraceServiceRequest{ ResourceSpans: orig.ResourceSpans, }, NewState()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go index ce3bfe232f0..56278b728dd 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go @@ -3,41 +3,28 @@ package internal // import "go.opentelemetry.io/collector/pdata/internal" -import ( - "go.opentelemetry.io/collector/pdata/internal/json" -) - -type TraceState struct { +type TraceStateWrapper struct { orig *string state *State } -func GetOrigTraceState(ms TraceState) *string { +func GetTraceStateOrig(ms TraceStateWrapper) *string { return ms.orig } -func GetTraceStateState(ms TraceState) *State { +func GetTraceStateState(ms TraceStateWrapper) *State { return ms.state } -func NewTraceState(orig *string, state *State) TraceState { - return TraceState{orig: orig, state: state} -} - -func GenerateTestTraceState() TraceState { - return NewTraceState(GenTestOrigTraceState(), NewState()) -} - -// UnmarshalJSONOrigTraceState marshals all properties from the current struct to the destination stream. -func UnmarshalJSONOrigTraceState(orig *string, iter *json.Iterator) { - *orig = iter.ReadString() +func NewTraceStateWrapper(orig *string, state *State) TraceStateWrapper { + return TraceStateWrapper{orig: orig, state: state} } -func CopyOrigTraceState(dest, src *string) { - *dest = *src +func GenTestTraceStateWrapper() TraceStateWrapper { + return NewTraceStateWrapper(GenTestTraceState(), NewState()) } -func GenTestOrigTraceState() *string { +func GenTestTraceState() *string { orig := new(string) *orig = "rojo=00f067aa0ba902b7" return orig diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go index 43839ae8248..c0220c8f010 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go @@ -3,72 +3,73 @@ package internal // import "go.opentelemetry.io/collector/pdata/internal" -import ( - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" -) - -type Value struct { - orig *otlpcommon.AnyValue +type ValueWrapper struct { + orig *AnyValue state *State } -func GetOrigValue(ms Value) *otlpcommon.AnyValue { +func GetValueOrig(ms ValueWrapper) *AnyValue { return ms.orig } -func GetValueState(ms Value) *State { +func GetValueState(ms ValueWrapper) *State { return ms.state } -func NewValue(orig *otlpcommon.AnyValue, state *State) Value { - return Value{orig: orig, state: state} +func NewValueWrapper(orig *AnyValue, state *State) ValueWrapper { + return ValueWrapper{orig: orig, state: state} +} + +func GenTestValueWrapper() ValueWrapper { + orig := GenTestAnyValue() + return NewValueWrapper(orig, NewState()) } -func NewOrigAnyValueStringValue() *otlpcommon.AnyValue_StringValue { +func NewAnyValueStringValue() *AnyValue_StringValue { if !UseProtoPooling.IsEnabled() { - return &otlpcommon.AnyValue_StringValue{} + return &AnyValue_StringValue{} } - return ProtoPoolAnyValue_StringValue.Get().(*otlpcommon.AnyValue_StringValue) + return ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue) } -func NewOrigAnyValueIntValue() *otlpcommon.AnyValue_IntValue { +func NewAnyValueIntValue() *AnyValue_IntValue { if !UseProtoPooling.IsEnabled() { - return &otlpcommon.AnyValue_IntValue{} + return &AnyValue_IntValue{} } - return ProtoPoolAnyValue_IntValue.Get().(*otlpcommon.AnyValue_IntValue) + return ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue) } -func NewOrigAnyValueBoolValue() *otlpcommon.AnyValue_BoolValue { +func NewAnyValueBoolValue() *AnyValue_BoolValue { if !UseProtoPooling.IsEnabled() { - return &otlpcommon.AnyValue_BoolValue{} + return &AnyValue_BoolValue{} } - return ProtoPoolAnyValue_BoolValue.Get().(*otlpcommon.AnyValue_BoolValue) + return ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue) } -func NewOrigAnyValueDoubleValue() *otlpcommon.AnyValue_DoubleValue { +func NewAnyValueDoubleValue() *AnyValue_DoubleValue { if !UseProtoPooling.IsEnabled() { - return &otlpcommon.AnyValue_DoubleValue{} + return &AnyValue_DoubleValue{} } - return ProtoPoolAnyValue_DoubleValue.Get().(*otlpcommon.AnyValue_DoubleValue) + return ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue) } -func NewOrigAnyValueBytesValue() *otlpcommon.AnyValue_BytesValue { +func NewAnyValueBytesValue() *AnyValue_BytesValue { if !UseProtoPooling.IsEnabled() { - return &otlpcommon.AnyValue_BytesValue{} + return &AnyValue_BytesValue{} } - return ProtoPoolAnyValue_BytesValue.Get().(*otlpcommon.AnyValue_BytesValue) + return ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue) } -func NewOrigAnyValueArrayValue() *otlpcommon.AnyValue_ArrayValue { +func NewAnyValueArrayValue() *AnyValue_ArrayValue { if !UseProtoPooling.IsEnabled() { - return &otlpcommon.AnyValue_ArrayValue{} + return &AnyValue_ArrayValue{} } - return ProtoPoolAnyValue_ArrayValue.Get().(*otlpcommon.AnyValue_ArrayValue) + return ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue) } -func NewOrigAnyValueKvlistValue() *otlpcommon.AnyValue_KvlistValue { +func NewAnyValueKvlistValue() *AnyValue_KvlistValue { if !UseProtoPooling.IsEnabled() { - return &otlpcommon.AnyValue_KvlistValue{} + return &AnyValue_KvlistValue{} } - return ProtoPoolAnyValue_KvlistValue.Get().(*otlpcommon.AnyValue_KvlistValue) + return ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go index 8cdedc2efb6..073b55d3416 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go @@ -18,31 +18,31 @@ import ( // // Must use NewByteSlice function to create new instances. // Important: zero-initialized instance is not valid for use. -type ByteSlice internal.ByteSlice +type ByteSlice internal.ByteSliceWrapper func (ms ByteSlice) getOrig() *[]byte { - return internal.GetOrigByteSlice(internal.ByteSlice(ms)) + return internal.GetByteSliceOrig(internal.ByteSliceWrapper(ms)) } func (ms ByteSlice) getState() *internal.State { - return internal.GetByteSliceState(internal.ByteSlice(ms)) + return internal.GetByteSliceState(internal.ByteSliceWrapper(ms)) } // NewByteSlice creates a new empty ByteSlice. func NewByteSlice() ByteSlice { orig := []byte(nil) - return ByteSlice(internal.NewByteSlice(&orig, internal.NewState())) + return ByteSlice(internal.NewByteSliceWrapper(&orig, internal.NewState())) } // AsRaw returns a copy of the []byte slice. func (ms ByteSlice) AsRaw() []byte { - return internal.CopyOrigByteSlice(nil, *ms.getOrig()) + return copyByteSlice(nil, *ms.getOrig()) } // FromRaw copies raw []byte into the slice ByteSlice. func (ms ByteSlice) FromRaw(val []byte) { ms.getState().AssertMutable() - *ms.getOrig() = internal.CopyOrigByteSlice(*ms.getOrig(), val) + *ms.getOrig() = copyByteSlice(*ms.getOrig(), val) } // Len returns length of the []byte slice value. @@ -155,10 +155,14 @@ func (ms ByteSlice) CopyTo(dest ByteSlice) { if ms.getOrig() == dest.getOrig() { return } - *dest.getOrig() = internal.CopyOrigByteSlice(*dest.getOrig(), *ms.getOrig()) + *dest.getOrig() = copyByteSlice(*dest.getOrig(), *ms.getOrig()) } // Equal checks equality with another ByteSlice func (ms ByteSlice) Equal(val ByteSlice) bool { return slices.Equal(*ms.getOrig(), *val.getOrig()) } + +func copyByteSlice(dst, src []byte) []byte { + return append(dst[:0], src...) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go index 0efbfd0f5b9..dac9ebc4edf 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go @@ -18,31 +18,31 @@ import ( // // Must use NewFloat64Slice function to create new instances. // Important: zero-initialized instance is not valid for use. -type Float64Slice internal.Float64Slice +type Float64Slice internal.Float64SliceWrapper func (ms Float64Slice) getOrig() *[]float64 { - return internal.GetOrigFloat64Slice(internal.Float64Slice(ms)) + return internal.GetFloat64SliceOrig(internal.Float64SliceWrapper(ms)) } func (ms Float64Slice) getState() *internal.State { - return internal.GetFloat64SliceState(internal.Float64Slice(ms)) + return internal.GetFloat64SliceState(internal.Float64SliceWrapper(ms)) } // NewFloat64Slice creates a new empty Float64Slice. func NewFloat64Slice() Float64Slice { orig := []float64(nil) - return Float64Slice(internal.NewFloat64Slice(&orig, internal.NewState())) + return Float64Slice(internal.NewFloat64SliceWrapper(&orig, internal.NewState())) } // AsRaw returns a copy of the []float64 slice. func (ms Float64Slice) AsRaw() []float64 { - return internal.CopyOrigFloat64Slice(nil, *ms.getOrig()) + return copyFloat64Slice(nil, *ms.getOrig()) } // FromRaw copies raw []float64 into the slice Float64Slice. func (ms Float64Slice) FromRaw(val []float64) { ms.getState().AssertMutable() - *ms.getOrig() = internal.CopyOrigFloat64Slice(*ms.getOrig(), val) + *ms.getOrig() = copyFloat64Slice(*ms.getOrig(), val) } // Len returns length of the []float64 slice value. @@ -155,10 +155,14 @@ func (ms Float64Slice) CopyTo(dest Float64Slice) { if ms.getOrig() == dest.getOrig() { return } - *dest.getOrig() = internal.CopyOrigFloat64Slice(*dest.getOrig(), *ms.getOrig()) + *dest.getOrig() = copyFloat64Slice(*dest.getOrig(), *ms.getOrig()) } // Equal checks equality with another Float64Slice func (ms Float64Slice) Equal(val Float64Slice) bool { return slices.Equal(*ms.getOrig(), *val.getOrig()) } + +func copyFloat64Slice(dst, src []float64) []float64 { + return append(dst[:0], src...) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go index b4385a0884e..6eeeab6e3cb 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go @@ -8,7 +8,6 @@ package pcommon import ( "go.opentelemetry.io/collector/pdata/internal" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" ) // InstrumentationScope is a message representing the instrumentation scope information. @@ -18,10 +17,10 @@ import ( // // Must use NewInstrumentationScope function to create new instances. // Important: zero-initialized instance is not valid for use. -type InstrumentationScope internal.InstrumentationScope +type InstrumentationScope internal.InstrumentationScopeWrapper -func newInstrumentationScope(orig *otlpcommon.InstrumentationScope, state *internal.State) InstrumentationScope { - return InstrumentationScope(internal.NewInstrumentationScope(orig, state)) +func newInstrumentationScope(orig *internal.InstrumentationScope, state *internal.State) InstrumentationScope { + return InstrumentationScope(internal.NewInstrumentationScopeWrapper(orig, state)) } // NewInstrumentationScope creates a new empty InstrumentationScope. @@ -29,7 +28,7 @@ func newInstrumentationScope(orig *otlpcommon.InstrumentationScope, state *inter // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewInstrumentationScope() InstrumentationScope { - return newInstrumentationScope(internal.NewOrigInstrumentationScope(), internal.NewState()) + return newInstrumentationScope(internal.NewInstrumentationScope(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -41,7 +40,7 @@ func (ms InstrumentationScope) MoveTo(dest InstrumentationScope) { if ms.getOrig() == dest.getOrig() { return } - internal.DeleteOrigInstrumentationScope(dest.getOrig(), false) + internal.DeleteInstrumentationScope(dest.getOrig(), false) *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig() } @@ -69,7 +68,7 @@ func (ms InstrumentationScope) SetVersion(v string) { // Attributes returns the Attributes associated with this InstrumentationScope. func (ms InstrumentationScope) Attributes() Map { - return Map(internal.NewMap(&ms.getOrig().Attributes, ms.getState())) + return Map(internal.NewMapWrapper(&ms.getOrig().Attributes, ms.getState())) } // DroppedAttributesCount returns the droppedattributescount associated with this InstrumentationScope. @@ -86,13 +85,13 @@ func (ms InstrumentationScope) SetDroppedAttributesCount(v uint32) { // CopyTo copies all properties from the current struct overriding the destination. func (ms InstrumentationScope) CopyTo(dest InstrumentationScope) { dest.getState().AssertMutable() - internal.CopyOrigInstrumentationScope(dest.getOrig(), ms.getOrig()) + internal.CopyInstrumentationScope(dest.getOrig(), ms.getOrig()) } -func (ms InstrumentationScope) getOrig() *otlpcommon.InstrumentationScope { - return internal.GetOrigInstrumentationScope(internal.InstrumentationScope(ms)) +func (ms InstrumentationScope) getOrig() *internal.InstrumentationScope { + return internal.GetInstrumentationScopeOrig(internal.InstrumentationScopeWrapper(ms)) } func (ms InstrumentationScope) getState() *internal.State { - return internal.GetInstrumentationScopeState(internal.InstrumentationScope(ms)) + return internal.GetInstrumentationScopeState(internal.InstrumentationScopeWrapper(ms)) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go index 6cf7e2822fb..d2bb746df53 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go @@ -18,31 +18,31 @@ import ( // // Must use NewInt32Slice function to create new instances. // Important: zero-initialized instance is not valid for use. -type Int32Slice internal.Int32Slice +type Int32Slice internal.Int32SliceWrapper func (ms Int32Slice) getOrig() *[]int32 { - return internal.GetOrigInt32Slice(internal.Int32Slice(ms)) + return internal.GetInt32SliceOrig(internal.Int32SliceWrapper(ms)) } func (ms Int32Slice) getState() *internal.State { - return internal.GetInt32SliceState(internal.Int32Slice(ms)) + return internal.GetInt32SliceState(internal.Int32SliceWrapper(ms)) } // NewInt32Slice creates a new empty Int32Slice. func NewInt32Slice() Int32Slice { orig := []int32(nil) - return Int32Slice(internal.NewInt32Slice(&orig, internal.NewState())) + return Int32Slice(internal.NewInt32SliceWrapper(&orig, internal.NewState())) } // AsRaw returns a copy of the []int32 slice. func (ms Int32Slice) AsRaw() []int32 { - return internal.CopyOrigInt32Slice(nil, *ms.getOrig()) + return copyInt32Slice(nil, *ms.getOrig()) } // FromRaw copies raw []int32 into the slice Int32Slice. func (ms Int32Slice) FromRaw(val []int32) { ms.getState().AssertMutable() - *ms.getOrig() = internal.CopyOrigInt32Slice(*ms.getOrig(), val) + *ms.getOrig() = copyInt32Slice(*ms.getOrig(), val) } // Len returns length of the []int32 slice value. @@ -155,10 +155,14 @@ func (ms Int32Slice) CopyTo(dest Int32Slice) { if ms.getOrig() == dest.getOrig() { return } - *dest.getOrig() = internal.CopyOrigInt32Slice(*dest.getOrig(), *ms.getOrig()) + *dest.getOrig() = copyInt32Slice(*dest.getOrig(), *ms.getOrig()) } // Equal checks equality with another Int32Slice func (ms Int32Slice) Equal(val Int32Slice) bool { return slices.Equal(*ms.getOrig(), *val.getOrig()) } + +func copyInt32Slice(dst, src []int32) []int32 { + return append(dst[:0], src...) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go index f38f92baec4..4c22f2ed825 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go @@ -18,31 +18,31 @@ import ( // // Must use NewInt64Slice function to create new instances. // Important: zero-initialized instance is not valid for use. -type Int64Slice internal.Int64Slice +type Int64Slice internal.Int64SliceWrapper func (ms Int64Slice) getOrig() *[]int64 { - return internal.GetOrigInt64Slice(internal.Int64Slice(ms)) + return internal.GetInt64SliceOrig(internal.Int64SliceWrapper(ms)) } func (ms Int64Slice) getState() *internal.State { - return internal.GetInt64SliceState(internal.Int64Slice(ms)) + return internal.GetInt64SliceState(internal.Int64SliceWrapper(ms)) } // NewInt64Slice creates a new empty Int64Slice. func NewInt64Slice() Int64Slice { orig := []int64(nil) - return Int64Slice(internal.NewInt64Slice(&orig, internal.NewState())) + return Int64Slice(internal.NewInt64SliceWrapper(&orig, internal.NewState())) } // AsRaw returns a copy of the []int64 slice. func (ms Int64Slice) AsRaw() []int64 { - return internal.CopyOrigInt64Slice(nil, *ms.getOrig()) + return copyInt64Slice(nil, *ms.getOrig()) } // FromRaw copies raw []int64 into the slice Int64Slice. func (ms Int64Slice) FromRaw(val []int64) { ms.getState().AssertMutable() - *ms.getOrig() = internal.CopyOrigInt64Slice(*ms.getOrig(), val) + *ms.getOrig() = copyInt64Slice(*ms.getOrig(), val) } // Len returns length of the []int64 slice value. @@ -155,10 +155,14 @@ func (ms Int64Slice) CopyTo(dest Int64Slice) { if ms.getOrig() == dest.getOrig() { return } - *dest.getOrig() = internal.CopyOrigInt64Slice(*dest.getOrig(), *ms.getOrig()) + *dest.getOrig() = copyInt64Slice(*dest.getOrig(), *ms.getOrig()) } // Equal checks equality with another Int64Slice func (ms Int64Slice) Equal(val Int64Slice) bool { return slices.Equal(*ms.getOrig(), *val.getOrig()) } + +func copyInt64Slice(dst, src []int64) []int64 { + return append(dst[:0], src...) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go index 66cd5156d27..4f767693b72 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go @@ -8,7 +8,6 @@ package pcommon import ( "go.opentelemetry.io/collector/pdata/internal" - otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" ) // Resource is a message representing the resource information. @@ -18,10 +17,10 @@ import ( // // Must use NewResource function to create new instances. // Important: zero-initialized instance is not valid for use. -type Resource internal.Resource +type Resource internal.ResourceWrapper -func newResource(orig *otlpresource.Resource, state *internal.State) Resource { - return Resource(internal.NewResource(orig, state)) +func newResource(orig *internal.Resource, state *internal.State) Resource { + return Resource(internal.NewResourceWrapper(orig, state)) } // NewResource creates a new empty Resource. @@ -29,7 +28,7 @@ func newResource(orig *otlpresource.Resource, state *internal.State) Resource { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewResource() Resource { - return newResource(internal.NewOrigResource(), internal.NewState()) + return newResource(internal.NewResource(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -41,13 +40,13 @@ func (ms Resource) MoveTo(dest Resource) { if ms.getOrig() == dest.getOrig() { return } - internal.DeleteOrigResource(dest.getOrig(), false) + internal.DeleteResource(dest.getOrig(), false) *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig() } // Attributes returns the Attributes associated with this Resource. func (ms Resource) Attributes() Map { - return Map(internal.NewMap(&ms.getOrig().Attributes, ms.getState())) + return Map(internal.NewMapWrapper(&ms.getOrig().Attributes, ms.getState())) } // DroppedAttributesCount returns the droppedattributescount associated with this Resource. @@ -64,13 +63,13 @@ func (ms Resource) SetDroppedAttributesCount(v uint32) { // CopyTo copies all properties from the current struct overriding the destination. func (ms Resource) CopyTo(dest Resource) { dest.getState().AssertMutable() - internal.CopyOrigResource(dest.getOrig(), ms.getOrig()) + internal.CopyResource(dest.getOrig(), ms.getOrig()) } -func (ms Resource) getOrig() *otlpresource.Resource { - return internal.GetOrigResource(internal.Resource(ms)) +func (ms Resource) getOrig() *internal.Resource { + return internal.GetResourceOrig(internal.ResourceWrapper(ms)) } func (ms Resource) getState() *internal.State { - return internal.GetResourceState(internal.Resource(ms)) + return internal.GetResourceState(internal.ResourceWrapper(ms)) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_slice.go index c60caaa33c1..0a28bde3af6 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_slice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_slice.go @@ -10,7 +10,6 @@ import ( "iter" "go.opentelemetry.io/collector/pdata/internal" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" ) // Slice logically represents a slice of Value. @@ -20,16 +19,16 @@ import ( // // Must use NewSlice function to create new instances. // Important: zero-initialized instance is not valid for use. -type Slice internal.Slice +type Slice internal.SliceWrapper -func newSlice(orig *[]otlpcommon.AnyValue, state *internal.State) Slice { - return Slice(internal.NewSlice(orig, state)) +func newSlice(orig *[]internal.AnyValue, state *internal.State) Slice { + return Slice(internal.NewSliceWrapper(orig, state)) } -// NewSlice creates a Slice with 0 elements. +// NewSlice creates a SliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewSlice() Slice { - orig := []otlpcommon.AnyValue(nil) + orig := []internal.AnyValue(nil) return newSlice(&orig, internal.NewState()) } @@ -86,7 +85,7 @@ func (es Slice) EnsureCapacity(newCap int) { return } - newOrig := make([]otlpcommon.AnyValue, len(*es.getOrig()), newCap) + newOrig := make([]internal.AnyValue, len(*es.getOrig()), newCap) copy(newOrig, *es.getOrig()) *es.getOrig() = newOrig } @@ -95,7 +94,7 @@ func (es Slice) EnsureCapacity(newCap int) { // It returns the newly added Value. func (es Slice) AppendEmpty() Value { es.getState().AssertMutable() - *es.getOrig() = append(*es.getOrig(), otlpcommon.AnyValue{}) + *es.getOrig() = append(*es.getOrig(), internal.AnyValue{}) return es.At(es.Len() - 1) } @@ -124,7 +123,7 @@ func (es Slice) RemoveIf(f func(Value) bool) { newLen := 0 for i := 0; i < len(*es.getOrig()); i++ { if f(es.At(i)) { - internal.DeleteOrigAnyValue(&(*es.getOrig())[i], false) + internal.DeleteAnyValue(&(*es.getOrig())[i], false) continue } if newLen == i { @@ -145,13 +144,13 @@ func (es Slice) CopyTo(dest Slice) { if es.getOrig() == dest.getOrig() { return } - *dest.getOrig() = internal.CopyOrigAnyValueSlice(*dest.getOrig(), *es.getOrig()) + *dest.getOrig() = internal.CopyAnyValueSlice(*dest.getOrig(), *es.getOrig()) } -func (ms Slice) getOrig() *[]otlpcommon.AnyValue { - return internal.GetOrigSlice(internal.Slice(ms)) +func (ms Slice) getOrig() *[]internal.AnyValue { + return internal.GetSliceOrig(internal.SliceWrapper(ms)) } func (ms Slice) getState() *internal.State { - return internal.GetSliceState(internal.Slice(ms)) + return internal.GetSliceState(internal.SliceWrapper(ms)) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go index a38c9b180e8..ff8422805d8 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go @@ -18,31 +18,31 @@ import ( // // Must use NewStringSlice function to create new instances. // Important: zero-initialized instance is not valid for use. -type StringSlice internal.StringSlice +type StringSlice internal.StringSliceWrapper func (ms StringSlice) getOrig() *[]string { - return internal.GetOrigStringSlice(internal.StringSlice(ms)) + return internal.GetStringSliceOrig(internal.StringSliceWrapper(ms)) } func (ms StringSlice) getState() *internal.State { - return internal.GetStringSliceState(internal.StringSlice(ms)) + return internal.GetStringSliceState(internal.StringSliceWrapper(ms)) } // NewStringSlice creates a new empty StringSlice. func NewStringSlice() StringSlice { orig := []string(nil) - return StringSlice(internal.NewStringSlice(&orig, internal.NewState())) + return StringSlice(internal.NewStringSliceWrapper(&orig, internal.NewState())) } // AsRaw returns a copy of the []string slice. func (ms StringSlice) AsRaw() []string { - return internal.CopyOrigStringSlice(nil, *ms.getOrig()) + return copyStringSlice(nil, *ms.getOrig()) } // FromRaw copies raw []string into the slice StringSlice. func (ms StringSlice) FromRaw(val []string) { ms.getState().AssertMutable() - *ms.getOrig() = internal.CopyOrigStringSlice(*ms.getOrig(), val) + *ms.getOrig() = copyStringSlice(*ms.getOrig(), val) } // Len returns length of the []string slice value. @@ -155,10 +155,14 @@ func (ms StringSlice) CopyTo(dest StringSlice) { if ms.getOrig() == dest.getOrig() { return } - *dest.getOrig() = internal.CopyOrigStringSlice(*dest.getOrig(), *ms.getOrig()) + *dest.getOrig() = copyStringSlice(*dest.getOrig(), *ms.getOrig()) } // Equal checks equality with another StringSlice func (ms StringSlice) Equal(val StringSlice) bool { return slices.Equal(*ms.getOrig(), *val.getOrig()) } + +func copyStringSlice(dst, src []string) []string { + return append(dst[:0], src...) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go index 0ea448a2b41..22539d9875e 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go @@ -18,31 +18,31 @@ import ( // // Must use NewUInt64Slice function to create new instances. // Important: zero-initialized instance is not valid for use. -type UInt64Slice internal.UInt64Slice +type UInt64Slice internal.UInt64SliceWrapper func (ms UInt64Slice) getOrig() *[]uint64 { - return internal.GetOrigUInt64Slice(internal.UInt64Slice(ms)) + return internal.GetUInt64SliceOrig(internal.UInt64SliceWrapper(ms)) } func (ms UInt64Slice) getState() *internal.State { - return internal.GetUInt64SliceState(internal.UInt64Slice(ms)) + return internal.GetUInt64SliceState(internal.UInt64SliceWrapper(ms)) } // NewUInt64Slice creates a new empty UInt64Slice. func NewUInt64Slice() UInt64Slice { orig := []uint64(nil) - return UInt64Slice(internal.NewUInt64Slice(&orig, internal.NewState())) + return UInt64Slice(internal.NewUInt64SliceWrapper(&orig, internal.NewState())) } // AsRaw returns a copy of the []uint64 slice. func (ms UInt64Slice) AsRaw() []uint64 { - return internal.CopyOrigUint64Slice(nil, *ms.getOrig()) + return copyUint64Slice(nil, *ms.getOrig()) } // FromRaw copies raw []uint64 into the slice UInt64Slice. func (ms UInt64Slice) FromRaw(val []uint64) { ms.getState().AssertMutable() - *ms.getOrig() = internal.CopyOrigUint64Slice(*ms.getOrig(), val) + *ms.getOrig() = copyUint64Slice(*ms.getOrig(), val) } // Len returns length of the []uint64 slice value. @@ -155,10 +155,14 @@ func (ms UInt64Slice) CopyTo(dest UInt64Slice) { if ms.getOrig() == dest.getOrig() { return } - *dest.getOrig() = internal.CopyOrigUint64Slice(*dest.getOrig(), *ms.getOrig()) + *dest.getOrig() = copyUint64Slice(*dest.getOrig(), *ms.getOrig()) } // Equal checks equality with another UInt64Slice func (ms UInt64Slice) Equal(val UInt64Slice) bool { return slices.Equal(*ms.getOrig(), *val.getOrig()) } + +func copyUint64Slice(dst, src []uint64) []uint64 { + return append(dst[:0], src...) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go index 9175dd0ea03..b5d94967c28 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go @@ -9,31 +9,30 @@ import ( "go.uber.org/multierr" "go.opentelemetry.io/collector/pdata/internal" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" ) // Map stores a map of string keys to elements of Value type. // // Must use NewMap function to create new instances. // Important: zero-initialized instance is not valid for use. -type Map internal.Map +type Map internal.MapWrapper // NewMap creates a Map with 0 elements. func NewMap() Map { - orig := []otlpcommon.KeyValue(nil) - return Map(internal.NewMap(&orig, internal.NewState())) + orig := []internal.KeyValue(nil) + return Map(internal.NewMapWrapper(&orig, internal.NewState())) } -func (m Map) getOrig() *[]otlpcommon.KeyValue { - return internal.GetOrigMap(internal.Map(m)) +func (m Map) getOrig() *[]internal.KeyValue { + return internal.GetMapOrig(internal.MapWrapper(m)) } func (m Map) getState() *internal.State { - return internal.GetMapState(internal.Map(m)) + return internal.GetMapState(internal.MapWrapper(m)) } -func newMap(orig *[]otlpcommon.KeyValue, state *internal.State) Map { - return Map(internal.NewMap(orig, state)) +func newMap(orig *[]internal.KeyValue, state *internal.State) Map { + return Map(internal.NewMapWrapper(orig, state)) } // Clear erases any existing entries in this Map instance. @@ -50,7 +49,7 @@ func (m Map) EnsureCapacity(capacity int) { if capacity <= cap(oldOrig) { return } - *m.getOrig() = make([]otlpcommon.KeyValue, len(oldOrig), capacity) + *m.getOrig() = make([]internal.KeyValue, len(oldOrig), capacity) copy(*m.getOrig(), oldOrig) } @@ -94,7 +93,7 @@ func (m Map) RemoveIf(f func(string, Value) bool) { newLen := 0 for i := 0; i < len(*m.getOrig()); i++ { if f((*m.getOrig())[i].Key, newValue(&(*m.getOrig())[i].Value, m.getState())) { - (*m.getOrig())[i] = otlpcommon.KeyValue{} + (*m.getOrig())[i] = internal.KeyValue{} continue } if newLen == i { @@ -103,7 +102,7 @@ func (m Map) RemoveIf(f func(string, Value) bool) { continue } (*m.getOrig())[newLen] = (*m.getOrig())[i] - (*m.getOrig())[i] = otlpcommon.KeyValue{} + (*m.getOrig())[i] = internal.KeyValue{} newLen++ } *m.getOrig() = (*m.getOrig())[:newLen] @@ -117,7 +116,7 @@ func (m Map) PutEmpty(k string) Value { av.getOrig().Value = nil return newValue(av.getOrig(), m.getState()) } - *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k}) + *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k}) return newValue(&(*m.getOrig())[len(*m.getOrig())-1].Value, m.getState()) } @@ -129,7 +128,7 @@ func (m Map) GetOrPutEmpty(k string) (Value, bool) { if av, existing := m.Get(k); existing { return av, true } - *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k}) + *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k}) return newValue(&(*m.getOrig())[len(*m.getOrig())-1].Value, m.getState()), false } @@ -142,9 +141,9 @@ func (m Map) PutStr(k, v string) { av.SetStr(v) return } - ov := internal.NewOrigAnyValueStringValue() + ov := internal.NewAnyValueStringValue() ov.StringValue = v - *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}}) + *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}}) } // PutInt performs the Insert or Update action. The int Value is @@ -156,9 +155,9 @@ func (m Map) PutInt(k string, v int64) { av.SetInt(v) return } - ov := internal.NewOrigAnyValueIntValue() + ov := internal.NewAnyValueIntValue() ov.IntValue = v - *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}}) + *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}}) } // PutDouble performs the Insert or Update action. The double Value is @@ -170,9 +169,9 @@ func (m Map) PutDouble(k string, v float64) { av.SetDouble(v) return } - ov := internal.NewOrigAnyValueDoubleValue() + ov := internal.NewAnyValueDoubleValue() ov.DoubleValue = v - *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}}) + *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}}) } // PutBool performs the Insert or Update action. The bool Value is @@ -184,9 +183,9 @@ func (m Map) PutBool(k string, v bool) { av.SetBool(v) return } - ov := internal.NewOrigAnyValueBoolValue() + ov := internal.NewAnyValueBoolValue() ov.BoolValue = v - *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}}) + *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}}) } // PutEmptyBytes inserts or updates an empty byte slice under given key and returns it. @@ -195,9 +194,9 @@ func (m Map) PutEmptyBytes(k string) ByteSlice { if av, existing := m.Get(k); existing { return av.SetEmptyBytes() } - ov := internal.NewOrigAnyValueBytesValue() - *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}}) - return ByteSlice(internal.NewByteSlice(&ov.BytesValue, m.getState())) + ov := internal.NewAnyValueBytesValue() + *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}}) + return ByteSlice(internal.NewByteSliceWrapper(&ov.BytesValue, m.getState())) } // PutEmptyMap inserts or updates an empty map under given key and returns it. @@ -206,10 +205,10 @@ func (m Map) PutEmptyMap(k string) Map { if av, existing := m.Get(k); existing { return av.SetEmptyMap() } - ov := internal.NewOrigAnyValueKvlistValue() - ov.KvlistValue = internal.NewOrigKeyValueList() - *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}}) - return Map(internal.NewMap(&ov.KvlistValue.Values, m.getState())) + ov := internal.NewAnyValueKvlistValue() + ov.KvlistValue = internal.NewKeyValueList() + *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}}) + return Map(internal.NewMapWrapper(&ov.KvlistValue.Values, m.getState())) } // PutEmptySlice inserts or updates an empty slice under given key and returns it. @@ -218,10 +217,10 @@ func (m Map) PutEmptySlice(k string) Slice { if av, existing := m.Get(k); existing { return av.SetEmptySlice() } - ov := internal.NewOrigAnyValueArrayValue() - ov.ArrayValue = internal.NewOrigArrayValue() - *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}}) - return Slice(internal.NewSlice(&ov.ArrayValue.Values, m.getState())) + ov := internal.NewAnyValueArrayValue() + ov.ArrayValue = internal.NewArrayValue() + *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}}) + return Slice(internal.NewSliceWrapper(&ov.ArrayValue.Values, m.getState())) } // Len returns the length of this map. @@ -242,7 +241,7 @@ func (m Map) Len() int { func (m Map) Range(f func(k string, v Value) bool) { for i := range *m.getOrig() { kv := &(*m.getOrig())[i] - if !f(kv.Key, Value(internal.NewValue(&kv.Value, m.getState()))) { + if !f(kv.Key, Value(internal.NewValueWrapper(&kv.Value, m.getState()))) { break } } @@ -257,7 +256,7 @@ func (m Map) All() iter.Seq2[string, Value] { return func(yield func(string, Value) bool) { for i := range *m.getOrig() { kv := &(*m.getOrig())[i] - if !yield(kv.Key, Value(internal.NewValue(&kv.Value, m.getState()))) { + if !yield(kv.Key, Value(internal.NewValueWrapper(&kv.Value, m.getState()))) { return } } @@ -283,7 +282,7 @@ func (m Map) CopyTo(dest Map) { if m.getOrig() == dest.getOrig() { return } - *dest.getOrig() = internal.CopyOrigKeyValueSlice(*dest.getOrig(), *m.getOrig()) + *dest.getOrig() = internal.CopyKeyValueSlice(*dest.getOrig(), *m.getOrig()) } // AsRaw returns a standard go map representation of this Map. @@ -305,7 +304,7 @@ func (m Map) FromRaw(rawMap map[string]any) error { } var errs error - origs := make([]otlpcommon.KeyValue, len(rawMap)) + origs := make([]internal.KeyValue, len(rawMap)) ix := 0 for k, iv := range rawMap { origs[ix].Key = k diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go index d4276a1d86b..380fc5c1b61 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go @@ -6,7 +6,7 @@ package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon" import ( "go.uber.org/multierr" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" + "go.opentelemetry.io/collector/pdata/internal" ) // AsRaw return []any copy of the Slice. @@ -26,7 +26,7 @@ func (es Slice) FromRaw(rawSlice []any) error { return nil } var errs error - origs := make([]otlpcommon.AnyValue, len(rawSlice)) + origs := make([]internal.AnyValue, len(rawSlice)) for ix, iv := range rawSlice { errs = multierr.Append(errs, newValue(&origs[ix], es.getState()).FromRaw(iv)) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go index 63399cb58ca..853c1a2d017 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go @@ -5,7 +5,7 @@ package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon" import ( "encoding/hex" - "go.opentelemetry.io/collector/pdata/internal/data" + "go.opentelemetry.io/collector/pdata/internal" ) var emptySpanID = SpanID([8]byte{}) @@ -32,5 +32,5 @@ func (ms SpanID) String() string { // IsEmpty returns true if id doesn't contain at least one non-zero byte. func (ms SpanID) IsEmpty() bool { - return data.SpanID(ms).IsEmpty() + return internal.SpanID(ms).IsEmpty() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go index 037213a0caf..5fd1758b1be 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go @@ -13,13 +13,11 @@ type Timestamp uint64 // NewTimestampFromTime constructs a new Timestamp from the provided time.Time. func NewTimestampFromTime(t time.Time) Timestamp { - //nolint:gosec return Timestamp(uint64(t.UnixNano())) } // AsTime converts this to a time.Time. func (ts Timestamp) AsTime() time.Time { - //nolint:gosec return time.Unix(0, int64(ts)).UTC() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go index a78f4ff0c70..167f7a3278c 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go @@ -11,18 +11,18 @@ import ( // // Must use NewTraceState function to create new instances. // Important: zero-initialized instance is not valid for use. -type TraceState internal.TraceState +type TraceState internal.TraceStateWrapper func NewTraceState() TraceState { - return TraceState(internal.NewTraceState(new(string), internal.NewState())) + return TraceState(internal.NewTraceStateWrapper(new(string), internal.NewState())) } func (ms TraceState) getOrig() *string { - return internal.GetOrigTraceState(internal.TraceState(ms)) + return internal.GetTraceStateOrig(internal.TraceStateWrapper(ms)) } func (ms TraceState) getState() *internal.State { - return internal.GetTraceStateState(internal.TraceState(ms)) + return internal.GetTraceStateState(internal.TraceStateWrapper(ms)) } // AsRaw returns the string representation of the tracestate in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go index 22ad5a5af4e..fd1df45d2c2 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go @@ -6,7 +6,7 @@ package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon" import ( "encoding/hex" - "go.opentelemetry.io/collector/pdata/internal/data" + "go.opentelemetry.io/collector/pdata/internal" ) var emptyTraceID = TraceID([16]byte{}) @@ -33,5 +33,5 @@ func (ms TraceID) String() string { // IsEmpty returns true if id doesn't contain at least one non-zero byte. func (ms TraceID) IsEmpty() bool { - return data.TraceID(ms).IsEmpty() + return internal.TraceID(ms).IsEmpty() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go index ad16e6173a5..3c39e704ed9 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go @@ -11,7 +11,6 @@ import ( "strconv" "go.opentelemetry.io/collector/pdata/internal" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" ) // ValueType specifies the type of Value. @@ -67,85 +66,85 @@ func (avt ValueType) String() string { // // Important: zero-initialized instance is not valid for use. All Value functions below must // be called only on instances that are created via NewValue+ functions. -type Value internal.Value +type Value internal.ValueWrapper // NewValueEmpty creates a new Value with an empty value. func NewValueEmpty() Value { - return newValue(&otlpcommon.AnyValue{}, internal.NewState()) + return newValue(&internal.AnyValue{}, internal.NewState()) } // NewValueStr creates a new Value with the given string value. func NewValueStr(v string) Value { - ov := internal.NewOrigAnyValueStringValue() + ov := internal.NewAnyValueStringValue() ov.StringValue = v - orig := internal.NewOrigAnyValue() + orig := internal.NewAnyValue() orig.Value = ov return newValue(orig, internal.NewState()) } // NewValueInt creates a new Value with the given int64 value. func NewValueInt(v int64) Value { - ov := internal.NewOrigAnyValueIntValue() + ov := internal.NewAnyValueIntValue() ov.IntValue = v - orig := internal.NewOrigAnyValue() + orig := internal.NewAnyValue() orig.Value = ov return newValue(orig, internal.NewState()) } // NewValueDouble creates a new Value with the given float64 value. func NewValueDouble(v float64) Value { - ov := internal.NewOrigAnyValueDoubleValue() + ov := internal.NewAnyValueDoubleValue() ov.DoubleValue = v - orig := internal.NewOrigAnyValue() + orig := internal.NewAnyValue() orig.Value = ov return newValue(orig, internal.NewState()) } // NewValueBool creates a new Value with the given bool value. func NewValueBool(v bool) Value { - ov := internal.NewOrigAnyValueBoolValue() + ov := internal.NewAnyValueBoolValue() ov.BoolValue = v - orig := internal.NewOrigAnyValue() + orig := internal.NewAnyValue() orig.Value = ov return newValue(orig, internal.NewState()) } // NewValueMap creates a new Value of map type. func NewValueMap() Value { - ov := internal.NewOrigAnyValueKvlistValue() - ov.KvlistValue = internal.NewOrigKeyValueList() - orig := internal.NewOrigAnyValue() + ov := internal.NewAnyValueKvlistValue() + ov.KvlistValue = internal.NewKeyValueList() + orig := internal.NewAnyValue() orig.Value = ov return newValue(orig, internal.NewState()) } // NewValueSlice creates a new Value of array type. func NewValueSlice() Value { - ov := internal.NewOrigAnyValueArrayValue() - ov.ArrayValue = internal.NewOrigArrayValue() - orig := internal.NewOrigAnyValue() + ov := internal.NewAnyValueArrayValue() + ov.ArrayValue = internal.NewArrayValue() + orig := internal.NewAnyValue() orig.Value = ov return newValue(orig, internal.NewState()) } // NewValueBytes creates a new empty Value of byte type. func NewValueBytes() Value { - ov := internal.NewOrigAnyValueBytesValue() - orig := internal.NewOrigAnyValue() + ov := internal.NewAnyValueBytesValue() + orig := internal.NewAnyValue() orig.Value = ov return newValue(orig, internal.NewState()) } -func newValue(orig *otlpcommon.AnyValue, state *internal.State) Value { - return Value(internal.NewValue(orig, state)) +func newValue(orig *internal.AnyValue, state *internal.State) Value { + return Value(internal.NewValueWrapper(orig, state)) } -func (v Value) getOrig() *otlpcommon.AnyValue { - return internal.GetOrigValue(internal.Value(v)) +func (v Value) getOrig() *internal.AnyValue { + return internal.GetValueOrig(internal.ValueWrapper(v)) } func (v Value) getState() *internal.State { - return internal.GetValueState(internal.Value(v)) + return internal.GetValueState(internal.ValueWrapper(v)) } // FromRaw sets the value from the given raw value. @@ -167,7 +166,6 @@ func (v Value) FromRaw(iv any) error { case int64: v.SetInt(tv) case uint: - //nolint:gosec v.SetInt(int64(tv)) case uint8: v.SetInt(int64(tv)) @@ -176,7 +174,6 @@ func (v Value) FromRaw(iv any) error { case uint32: v.SetInt(int64(tv)) case uint64: - //nolint:gosec v.SetInt(int64(tv)) case float32: v.SetDouble(float64(tv)) @@ -200,19 +197,19 @@ func (v Value) FromRaw(iv any) error { // Calling this function on zero-initialized Value will cause a panic. func (v Value) Type() ValueType { switch v.getOrig().Value.(type) { - case *otlpcommon.AnyValue_StringValue: + case *internal.AnyValue_StringValue: return ValueTypeStr - case *otlpcommon.AnyValue_BoolValue: + case *internal.AnyValue_BoolValue: return ValueTypeBool - case *otlpcommon.AnyValue_IntValue: + case *internal.AnyValue_IntValue: return ValueTypeInt - case *otlpcommon.AnyValue_DoubleValue: + case *internal.AnyValue_DoubleValue: return ValueTypeDouble - case *otlpcommon.AnyValue_KvlistValue: + case *internal.AnyValue_KvlistValue: return ValueTypeMap - case *otlpcommon.AnyValue_ArrayValue: + case *internal.AnyValue_ArrayValue: return ValueTypeSlice - case *otlpcommon.AnyValue_BytesValue: + case *internal.AnyValue_BytesValue: return ValueTypeBytes } return ValueTypeEmpty @@ -251,7 +248,7 @@ func (v Value) Map() Map { if kvlist == nil { return Map{} } - return newMap(&kvlist.Values, internal.GetValueState(internal.Value(v))) + return newMap(&kvlist.Values, internal.GetValueState(internal.ValueWrapper(v))) } // Slice returns the slice value associated with this Value. @@ -262,18 +259,18 @@ func (v Value) Slice() Slice { if arr == nil { return Slice{} } - return newSlice(&arr.Values, internal.GetValueState(internal.Value(v))) + return newSlice(&arr.Values, internal.GetValueState(internal.ValueWrapper(v))) } // Bytes returns the ByteSlice value associated with this Value. // If the function is called on zero-initialized Value or if the Type() is not ValueTypeBytes // then returns an invalid ByteSlice object. Note that using such slice can cause panic. func (v Value) Bytes() ByteSlice { - bv, ok := v.getOrig().GetValue().(*otlpcommon.AnyValue_BytesValue) + bv, ok := v.getOrig().GetValue().(*internal.AnyValue_BytesValue) if !ok { return ByteSlice{} } - return ByteSlice(internal.NewByteSlice(&bv.BytesValue, internal.GetValueState(internal.Value(v)))) + return ByteSlice(internal.NewByteSliceWrapper(&bv.BytesValue, internal.GetValueState(internal.ValueWrapper(v)))) } // SetStr replaces the string value associated with this Value, @@ -284,8 +281,8 @@ func (v Value) Bytes() ByteSlice { func (v Value) SetStr(sv string) { v.getState().AssertMutable() // Delete everything but the AnyValue object itself. - internal.DeleteOrigAnyValue(v.getOrig(), false) - ov := internal.NewOrigAnyValueStringValue() + internal.DeleteAnyValue(v.getOrig(), false) + ov := internal.NewAnyValueStringValue() ov.StringValue = sv v.getOrig().Value = ov } @@ -296,8 +293,8 @@ func (v Value) SetStr(sv string) { func (v Value) SetInt(iv int64) { v.getState().AssertMutable() // Delete everything but the AnyValue object itself. - internal.DeleteOrigAnyValue(v.getOrig(), false) - ov := internal.NewOrigAnyValueIntValue() + internal.DeleteAnyValue(v.getOrig(), false) + ov := internal.NewAnyValueIntValue() ov.IntValue = iv v.getOrig().Value = ov } @@ -308,8 +305,8 @@ func (v Value) SetInt(iv int64) { func (v Value) SetDouble(dv float64) { v.getState().AssertMutable() // Delete everything but the AnyValue object itself. - internal.DeleteOrigAnyValue(v.getOrig(), false) - ov := internal.NewOrigAnyValueDoubleValue() + internal.DeleteAnyValue(v.getOrig(), false) + ov := internal.NewAnyValueDoubleValue() ov.DoubleValue = dv v.getOrig().Value = ov } @@ -320,8 +317,8 @@ func (v Value) SetDouble(dv float64) { func (v Value) SetBool(bv bool) { v.getState().AssertMutable() // Delete everything but the AnyValue object itself. - internal.DeleteOrigAnyValue(v.getOrig(), false) - ov := internal.NewOrigAnyValueBoolValue() + internal.DeleteAnyValue(v.getOrig(), false) + ov := internal.NewAnyValueBoolValue() ov.BoolValue = bv v.getOrig().Value = ov } @@ -331,10 +328,10 @@ func (v Value) SetBool(bv bool) { func (v Value) SetEmptyBytes() ByteSlice { v.getState().AssertMutable() // Delete everything but the AnyValue object itself. - internal.DeleteOrigAnyValue(v.getOrig(), false) - bv := internal.NewOrigAnyValueBytesValue() + internal.DeleteAnyValue(v.getOrig(), false) + bv := internal.NewAnyValueBytesValue() v.getOrig().Value = bv - return ByteSlice(internal.NewByteSlice(&bv.BytesValue, v.getState())) + return ByteSlice(internal.NewByteSliceWrapper(&bv.BytesValue, v.getState())) } // SetEmptyMap sets value to an empty map and returns it. @@ -342,9 +339,9 @@ func (v Value) SetEmptyBytes() ByteSlice { func (v Value) SetEmptyMap() Map { v.getState().AssertMutable() // Delete everything but the AnyValue object itself. - internal.DeleteOrigAnyValue(v.getOrig(), false) - ov := internal.NewOrigAnyValueKvlistValue() - ov.KvlistValue = internal.NewOrigKeyValueList() + internal.DeleteAnyValue(v.getOrig(), false) + ov := internal.NewAnyValueKvlistValue() + ov.KvlistValue = internal.NewKeyValueList() v.getOrig().Value = ov return newMap(&ov.KvlistValue.Values, v.getState()) } @@ -354,9 +351,9 @@ func (v Value) SetEmptyMap() Map { func (v Value) SetEmptySlice() Slice { v.getState().AssertMutable() // Delete everything but the AnyValue object itself. - internal.DeleteOrigAnyValue(v.getOrig(), false) - ov := internal.NewOrigAnyValueArrayValue() - ov.ArrayValue = internal.NewOrigArrayValue() + internal.DeleteAnyValue(v.getOrig(), false) + ov := internal.NewAnyValueArrayValue() + ov.ArrayValue = internal.NewArrayValue() v.getOrig().Value = ov return newSlice(&ov.ArrayValue.Values, v.getState()) } @@ -379,7 +376,7 @@ func (v Value) MoveTo(dest Value) { // Calling this function on zero-initialized Value will cause a panic. func (v Value) CopyTo(dest Value) { dest.getState().AssertMutable() - internal.CopyOrigAnyValue(dest.getOrig(), v.getOrig()) + internal.CopyAnyValue(dest.getOrig(), v.getOrig()) } // AsString converts an OTLP Value object of any type to its equivalent string diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go index 4585c326a1d..80910c903fc 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go @@ -8,8 +8,6 @@ package plog import ( "go.opentelemetry.io/collector/pdata/internal" - "go.opentelemetry.io/collector/pdata/internal/data" - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -21,11 +19,11 @@ import ( // Must use NewLogRecord function to create new instances. // Important: zero-initialized instance is not valid for use. type LogRecord struct { - orig *otlplogs.LogRecord + orig *internal.LogRecord state *internal.State } -func newLogRecord(orig *otlplogs.LogRecord, state *internal.State) LogRecord { +func newLogRecord(orig *internal.LogRecord, state *internal.State) LogRecord { return LogRecord{orig: orig, state: state} } @@ -34,7 +32,7 @@ func newLogRecord(orig *otlplogs.LogRecord, state *internal.State) LogRecord { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewLogRecord() LogRecord { - return newLogRecord(internal.NewOrigLogRecord(), internal.NewState()) + return newLogRecord(internal.NewLogRecord(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -46,7 +44,7 @@ func (ms LogRecord) MoveTo(dest LogRecord) { if ms.orig == dest.orig { return } - internal.DeleteOrigLogRecord(dest.orig, false) + internal.DeleteLogRecord(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -80,7 +78,7 @@ func (ms LogRecord) SeverityNumber() SeverityNumber { // SetSeverityNumber replaces the severitynumber associated with this LogRecord. func (ms LogRecord) SetSeverityNumber(v SeverityNumber) { ms.state.AssertMutable() - ms.orig.SeverityNumber = otlplogs.SeverityNumber(v) + ms.orig.SeverityNumber = internal.SeverityNumber(v) } // SeverityText returns the severitytext associated with this LogRecord. @@ -96,12 +94,12 @@ func (ms LogRecord) SetSeverityText(v string) { // Body returns the body associated with this LogRecord. func (ms LogRecord) Body() pcommon.Value { - return pcommon.Value(internal.NewValue(&ms.orig.Body, ms.state)) + return pcommon.Value(internal.NewValueWrapper(&ms.orig.Body, ms.state)) } // Attributes returns the Attributes associated with this LogRecord. func (ms LogRecord) Attributes() pcommon.Map { - return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) + return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state)) } // DroppedAttributesCount returns the droppedattributescount associated with this LogRecord. @@ -134,7 +132,7 @@ func (ms LogRecord) TraceID() pcommon.TraceID { // SetTraceID replaces the traceid associated with this LogRecord. func (ms LogRecord) SetTraceID(v pcommon.TraceID) { ms.state.AssertMutable() - ms.orig.TraceId = data.TraceID(v) + ms.orig.TraceId = internal.TraceID(v) } // SpanID returns the spanid associated with this LogRecord. @@ -145,7 +143,7 @@ func (ms LogRecord) SpanID() pcommon.SpanID { // SetSpanID replaces the spanid associated with this LogRecord. func (ms LogRecord) SetSpanID(v pcommon.SpanID) { ms.state.AssertMutable() - ms.orig.SpanId = data.SpanID(v) + ms.orig.SpanId = internal.SpanID(v) } // EventName returns the eventname associated with this LogRecord. @@ -162,5 +160,5 @@ func (ms LogRecord) SetEventName(v string) { // CopyTo copies all properties from the current struct overriding the destination. func (ms LogRecord) CopyTo(dest LogRecord) { dest.state.AssertMutable() - internal.CopyOrigLogRecord(dest.orig, ms.orig) + internal.CopyLogRecord(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go index cc06afe99be..58751158d7b 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" ) // LogRecordSlice logically represents a slice of LogRecord. @@ -22,18 +21,18 @@ import ( // Must use NewLogRecordSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type LogRecordSlice struct { - orig *[]*otlplogs.LogRecord + orig *[]*internal.LogRecord state *internal.State } -func newLogRecordSlice(orig *[]*otlplogs.LogRecord, state *internal.State) LogRecordSlice { +func newLogRecordSlice(orig *[]*internal.LogRecord, state *internal.State) LogRecordSlice { return LogRecordSlice{orig: orig, state: state} } -// NewLogRecordSlice creates a LogRecordSlice with 0 elements. +// NewLogRecordSlice creates a LogRecordSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewLogRecordSlice() LogRecordSlice { - orig := []*otlplogs.LogRecord(nil) + orig := []*internal.LogRecord(nil) return newLogRecordSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es LogRecordSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlplogs.LogRecord, len(*es.orig), newCap) + newOrig := make([]*internal.LogRecord, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es LogRecordSlice) EnsureCapacity(newCap int) { // It returns the newly added LogRecord. func (es LogRecordSlice) AppendEmpty() LogRecord { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigLogRecord()) + *es.orig = append(*es.orig, internal.NewLogRecord()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es LogRecordSlice) RemoveIf(f func(LogRecord) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigLogRecord((*es.orig)[i], true) + internal.DeleteLogRecord((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es LogRecordSlice) CopyTo(dest LogRecordSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigLogRecordSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyLogRecordPtrSlice(*dest.orig, *es.orig) } // Sort sorts the LogRecord elements within LogRecordSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logs.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logs.go index 2bd4ee199f2..238ed8764d5 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logs.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logs.go @@ -8,7 +8,6 @@ package plog import ( "go.opentelemetry.io/collector/pdata/internal" - otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" ) // Logs is the top-level struct that is propagated through the logs pipeline. @@ -19,10 +18,10 @@ import ( // // Must use NewLogs function to create new instances. // Important: zero-initialized instance is not valid for use. -type Logs internal.Logs +type Logs internal.LogsWrapper -func newLogs(orig *otlpcollectorlogs.ExportLogsServiceRequest, state *internal.State) Logs { - return Logs(internal.NewLogs(orig, state)) +func newLogs(orig *internal.ExportLogsServiceRequest, state *internal.State) Logs { + return Logs(internal.NewLogsWrapper(orig, state)) } // NewLogs creates a new empty Logs. @@ -30,7 +29,7 @@ func newLogs(orig *otlpcollectorlogs.ExportLogsServiceRequest, state *internal.S // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewLogs() Logs { - return newLogs(internal.NewOrigExportLogsServiceRequest(), internal.NewState()) + return newLogs(internal.NewExportLogsServiceRequest(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -42,7 +41,7 @@ func (ms Logs) MoveTo(dest Logs) { if ms.getOrig() == dest.getOrig() { return } - internal.DeleteOrigExportLogsServiceRequest(dest.getOrig(), false) + internal.DeleteExportLogsServiceRequest(dest.getOrig(), false) *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig() } @@ -54,13 +53,13 @@ func (ms Logs) ResourceLogs() ResourceLogsSlice { // CopyTo copies all properties from the current struct overriding the destination. func (ms Logs) CopyTo(dest Logs) { dest.getState().AssertMutable() - internal.CopyOrigExportLogsServiceRequest(dest.getOrig(), ms.getOrig()) + internal.CopyExportLogsServiceRequest(dest.getOrig(), ms.getOrig()) } -func (ms Logs) getOrig() *otlpcollectorlogs.ExportLogsServiceRequest { - return internal.GetOrigLogs(internal.Logs(ms)) +func (ms Logs) getOrig() *internal.ExportLogsServiceRequest { + return internal.GetLogsOrig(internal.LogsWrapper(ms)) } func (ms Logs) getState() *internal.State { - return internal.GetLogsState(internal.Logs(ms)) + return internal.GetLogsState(internal.LogsWrapper(ms)) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go index 2283d5451b7..b3b64ff4461 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go @@ -8,7 +8,6 @@ package plog import ( "go.opentelemetry.io/collector/pdata/internal" - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewResourceLogs function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceLogs struct { - orig *otlplogs.ResourceLogs + orig *internal.ResourceLogs state *internal.State } -func newResourceLogs(orig *otlplogs.ResourceLogs, state *internal.State) ResourceLogs { +func newResourceLogs(orig *internal.ResourceLogs, state *internal.State) ResourceLogs { return ResourceLogs{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newResourceLogs(orig *otlplogs.ResourceLogs, state *internal.State) Resourc // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewResourceLogs() ResourceLogs { - return newResourceLogs(internal.NewOrigResourceLogs(), internal.NewState()) + return newResourceLogs(internal.NewResourceLogs(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,13 +44,13 @@ func (ms ResourceLogs) MoveTo(dest ResourceLogs) { if ms.orig == dest.orig { return } - internal.DeleteOrigResourceLogs(dest.orig, false) + internal.DeleteResourceLogs(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // Resource returns the resource associated with this ResourceLogs. func (ms ResourceLogs) Resource() pcommon.Resource { - return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state)) + return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state)) } // ScopeLogs returns the ScopeLogs associated with this ResourceLogs. @@ -73,5 +72,5 @@ func (ms ResourceLogs) SetSchemaUrl(v string) { // CopyTo copies all properties from the current struct overriding the destination. func (ms ResourceLogs) CopyTo(dest ResourceLogs) { dest.state.AssertMutable() - internal.CopyOrigResourceLogs(dest.orig, ms.orig) + internal.CopyResourceLogs(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go index eb358473850..d730aed983a 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" ) // ResourceLogsSlice logically represents a slice of ResourceLogs. @@ -22,18 +21,18 @@ import ( // Must use NewResourceLogsSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceLogsSlice struct { - orig *[]*otlplogs.ResourceLogs + orig *[]*internal.ResourceLogs state *internal.State } -func newResourceLogsSlice(orig *[]*otlplogs.ResourceLogs, state *internal.State) ResourceLogsSlice { +func newResourceLogsSlice(orig *[]*internal.ResourceLogs, state *internal.State) ResourceLogsSlice { return ResourceLogsSlice{orig: orig, state: state} } -// NewResourceLogsSlice creates a ResourceLogsSlice with 0 elements. +// NewResourceLogsSlice creates a ResourceLogsSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewResourceLogsSlice() ResourceLogsSlice { - orig := []*otlplogs.ResourceLogs(nil) + orig := []*internal.ResourceLogs(nil) return newResourceLogsSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es ResourceLogsSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlplogs.ResourceLogs, len(*es.orig), newCap) + newOrig := make([]*internal.ResourceLogs, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es ResourceLogsSlice) EnsureCapacity(newCap int) { // It returns the newly added ResourceLogs. func (es ResourceLogsSlice) AppendEmpty() ResourceLogs { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigResourceLogs()) + *es.orig = append(*es.orig, internal.NewResourceLogs()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es ResourceLogsSlice) RemoveIf(f func(ResourceLogs) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigResourceLogs((*es.orig)[i], true) + internal.DeleteResourceLogs((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es ResourceLogsSlice) CopyTo(dest ResourceLogsSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigResourceLogsSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyResourceLogsPtrSlice(*dest.orig, *es.orig) } // Sort sorts the ResourceLogs elements within ResourceLogsSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go index b6ae0a100fb..9cb55863ad2 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go @@ -8,7 +8,6 @@ package plog import ( "go.opentelemetry.io/collector/pdata/internal" - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewScopeLogs function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeLogs struct { - orig *otlplogs.ScopeLogs + orig *internal.ScopeLogs state *internal.State } -func newScopeLogs(orig *otlplogs.ScopeLogs, state *internal.State) ScopeLogs { +func newScopeLogs(orig *internal.ScopeLogs, state *internal.State) ScopeLogs { return ScopeLogs{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newScopeLogs(orig *otlplogs.ScopeLogs, state *internal.State) ScopeLogs { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewScopeLogs() ScopeLogs { - return newScopeLogs(internal.NewOrigScopeLogs(), internal.NewState()) + return newScopeLogs(internal.NewScopeLogs(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,13 +44,13 @@ func (ms ScopeLogs) MoveTo(dest ScopeLogs) { if ms.orig == dest.orig { return } - internal.DeleteOrigScopeLogs(dest.orig, false) + internal.DeleteScopeLogs(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // Scope returns the scope associated with this ScopeLogs. func (ms ScopeLogs) Scope() pcommon.InstrumentationScope { - return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state)) + return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state)) } // LogRecords returns the LogRecords associated with this ScopeLogs. @@ -73,5 +72,5 @@ func (ms ScopeLogs) SetSchemaUrl(v string) { // CopyTo copies all properties from the current struct overriding the destination. func (ms ScopeLogs) CopyTo(dest ScopeLogs) { dest.state.AssertMutable() - internal.CopyOrigScopeLogs(dest.orig, ms.orig) + internal.CopyScopeLogs(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go index af4f01eb09b..50309c7df9e 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" ) // ScopeLogsSlice logically represents a slice of ScopeLogs. @@ -22,18 +21,18 @@ import ( // Must use NewScopeLogsSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeLogsSlice struct { - orig *[]*otlplogs.ScopeLogs + orig *[]*internal.ScopeLogs state *internal.State } -func newScopeLogsSlice(orig *[]*otlplogs.ScopeLogs, state *internal.State) ScopeLogsSlice { +func newScopeLogsSlice(orig *[]*internal.ScopeLogs, state *internal.State) ScopeLogsSlice { return ScopeLogsSlice{orig: orig, state: state} } -// NewScopeLogsSlice creates a ScopeLogsSlice with 0 elements. +// NewScopeLogsSlice creates a ScopeLogsSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewScopeLogsSlice() ScopeLogsSlice { - orig := []*otlplogs.ScopeLogs(nil) + orig := []*internal.ScopeLogs(nil) return newScopeLogsSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es ScopeLogsSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlplogs.ScopeLogs, len(*es.orig), newCap) + newOrig := make([]*internal.ScopeLogs, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es ScopeLogsSlice) EnsureCapacity(newCap int) { // It returns the newly added ScopeLogs. func (es ScopeLogsSlice) AppendEmpty() ScopeLogs { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigScopeLogs()) + *es.orig = append(*es.orig, internal.NewScopeLogs()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es ScopeLogsSlice) RemoveIf(f func(ScopeLogs) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigScopeLogs((*es.orig)[i], true) + internal.DeleteScopeLogs((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es ScopeLogsSlice) CopyTo(dest ScopeLogsSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigScopeLogsSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyScopeLogsPtrSlice(*dest.orig, *es.orig) } // Sort sorts the ScopeLogs elements within ScopeLogsSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/json.go b/vendor/go.opentelemetry.io/collector/pdata/plog/json.go index 362ee3df3a7..7d7bdaae256 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/json.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/json.go @@ -6,7 +6,6 @@ package plog // import "go.opentelemetry.io/collector/pdata/plog" import ( "slices" - "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" ) @@ -18,7 +17,7 @@ type JSONMarshaler struct{} func (*JSONMarshaler) MarshalLogs(ld Logs) ([]byte, error) { dest := json.BorrowStream(nil) defer json.ReturnStream(dest) - internal.MarshalJSONOrigExportLogsServiceRequest(ld.getOrig(), dest) + ld.getOrig().MarshalJSON(dest) if dest.Error() != nil { return nil, dest.Error() } @@ -35,7 +34,7 @@ func (*JSONUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) { iter := json.BorrowIterator(buf) defer json.ReturnIterator(iter) ld := NewLogs() - internal.UnmarshalJSONOrigExportLogsServiceRequest(ld.getOrig(), iter) + ld.getOrig().UnmarshalJSON(iter) if iter.Error() != nil { return Logs{}, iter.Error() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go b/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go index aad904c6d82..1f717928f93 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go @@ -3,35 +3,31 @@ package plog // import "go.opentelemetry.io/collector/pdata/plog" -import ( - "go.opentelemetry.io/collector/pdata/internal" -) - var _ MarshalSizer = (*ProtoMarshaler)(nil) type ProtoMarshaler struct{} func (e *ProtoMarshaler) MarshalLogs(ld Logs) ([]byte, error) { - size := internal.SizeProtoOrigExportLogsServiceRequest(ld.getOrig()) + size := ld.getOrig().SizeProto() buf := make([]byte, size) - _ = internal.MarshalProtoOrigExportLogsServiceRequest(ld.getOrig(), buf) + _ = ld.getOrig().MarshalProto(buf) return buf, nil } func (e *ProtoMarshaler) LogsSize(ld Logs) int { - return internal.SizeProtoOrigExportLogsServiceRequest(ld.getOrig()) + return ld.getOrig().SizeProto() } func (e *ProtoMarshaler) ResourceLogsSize(ld ResourceLogs) int { - return internal.SizeProtoOrigResourceLogs(ld.orig) + return ld.orig.SizeProto() } func (e *ProtoMarshaler) ScopeLogsSize(ld ScopeLogs) int { - return internal.SizeProtoOrigScopeLogs(ld.orig) + return ld.orig.SizeProto() } func (e *ProtoMarshaler) LogRecordSize(ld LogRecord) int { - return internal.SizeProtoOrigLogRecord(ld.orig) + return ld.orig.SizeProto() } var _ Unmarshaler = (*ProtoUnmarshaler)(nil) @@ -40,7 +36,7 @@ type ProtoUnmarshaler struct{} func (d *ProtoUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) { ld := NewLogs() - err := internal.UnmarshalProtoOrigExportLogsServiceRequest(ld.getOrig(), buf) + err := ld.getOrig().UnmarshalProto(buf) if err != nil { return Logs{}, err } diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/generated_exportpartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/generated_exportpartialsuccess.go index b92bd222beb..e465eb01e5d 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/generated_exportpartialsuccess.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/generated_exportpartialsuccess.go @@ -8,7 +8,6 @@ package plogotlp import ( "go.opentelemetry.io/collector/pdata/internal" - otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" ) // ExportPartialSuccess represents the details of a partially successful export request. @@ -19,11 +18,11 @@ import ( // Must use NewExportPartialSuccess function to create new instances. // Important: zero-initialized instance is not valid for use. type ExportPartialSuccess struct { - orig *otlpcollectorlogs.ExportLogsPartialSuccess + orig *internal.ExportLogsPartialSuccess state *internal.State } -func newExportPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, state *internal.State) ExportPartialSuccess { +func newExportPartialSuccess(orig *internal.ExportLogsPartialSuccess, state *internal.State) ExportPartialSuccess { return ExportPartialSuccess{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newExportPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, s // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExportPartialSuccess() ExportPartialSuccess { - return newExportPartialSuccess(internal.NewOrigExportLogsPartialSuccess(), internal.NewState()) + return newExportPartialSuccess(internal.NewExportLogsPartialSuccess(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) { if ms.orig == dest.orig { return } - internal.DeleteOrigExportLogsPartialSuccess(dest.orig, false) + internal.DeleteExportLogsPartialSuccess(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -73,5 +72,5 @@ func (ms ExportPartialSuccess) SetErrorMessage(v string) { // CopyTo copies all properties from the current struct overriding the destination. func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) { dest.state.AssertMutable() - internal.CopyOrigExportLogsPartialSuccess(dest.orig, ms.orig) + internal.CopyExportLogsPartialSuccess(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/generated_exportresponse.go b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/generated_exportresponse.go index 7506faba692..09145a747c7 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/generated_exportresponse.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/generated_exportresponse.go @@ -8,7 +8,6 @@ package plogotlp import ( "go.opentelemetry.io/collector/pdata/internal" - otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" ) // ExportResponse represents the response for gRPC/HTTP client/server. @@ -19,11 +18,11 @@ import ( // Must use NewExportResponse function to create new instances. // Important: zero-initialized instance is not valid for use. type ExportResponse struct { - orig *otlpcollectorlogs.ExportLogsServiceResponse + orig *internal.ExportLogsServiceResponse state *internal.State } -func newExportResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, state *internal.State) ExportResponse { +func newExportResponse(orig *internal.ExportLogsServiceResponse, state *internal.State) ExportResponse { return ExportResponse{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newExportResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, state // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExportResponse() ExportResponse { - return newExportResponse(internal.NewOrigExportLogsServiceResponse(), internal.NewState()) + return newExportResponse(internal.NewExportLogsServiceResponse(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms ExportResponse) MoveTo(dest ExportResponse) { if ms.orig == dest.orig { return } - internal.DeleteOrigExportLogsServiceResponse(dest.orig, false) + internal.DeleteExportLogsServiceResponse(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -56,5 +55,5 @@ func (ms ExportResponse) PartialSuccess() ExportPartialSuccess { // CopyTo copies all properties from the current struct overriding the destination. func (ms ExportResponse) CopyTo(dest ExportResponse) { dest.state.AssertMutable() - internal.CopyOrigExportLogsServiceResponse(dest.orig, ms.orig) + internal.CopyExportLogsServiceResponse(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/grpc.go b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/grpc.go index 6a892606d88..77cc3b80609 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/grpc.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/grpc.go @@ -11,8 +11,7 @@ import ( "google.golang.org/grpc/status" "go.opentelemetry.io/collector/pdata/internal" - otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" - _ "go.opentelemetry.io/collector/pdata/internal/grpcencoding" // enforces custom gRPC encoding to be loaded. + "go.opentelemetry.io/collector/pdata/internal/otelgrpc" "go.opentelemetry.io/collector/pdata/internal/otlp" ) @@ -32,11 +31,11 @@ type GRPCClient interface { // NewGRPCClient returns a new GRPCClient connected using the given connection. func NewGRPCClient(cc *grpc.ClientConn) GRPCClient { - return &grpcClient{rawClient: otlpcollectorlog.NewLogsServiceClient(cc)} + return &grpcClient{rawClient: otelgrpc.NewLogsServiceClient(cc)} } type grpcClient struct { - rawClient otlpcollectorlog.LogsServiceClient + rawClient otelgrpc.LogsServiceClient } func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) { @@ -75,14 +74,14 @@ func (*UnimplementedGRPCServer) unexported() {} // RegisterGRPCServer registers the Server to the grpc.Server. func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) { - otlpcollectorlog.RegisterLogsServiceServer(s, &rawLogsServer{srv: srv}) + otelgrpc.RegisterLogsServiceServer(s, &rawLogsServer{srv: srv}) } type rawLogsServer struct { srv GRPCServer } -func (s rawLogsServer) Export(ctx context.Context, request *otlpcollectorlog.ExportLogsServiceRequest) (*otlpcollectorlog.ExportLogsServiceResponse, error) { +func (s rawLogsServer) Export(ctx context.Context, request *internal.ExportLogsServiceRequest) (*internal.ExportLogsServiceResponse, error) { otlp.MigrateLogs(request.ResourceLogs) rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: internal.NewState()}) return rsp.orig, err diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/request.go b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/request.go index 57b8743f4b8..a25f873e8a5 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/request.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/request.go @@ -7,7 +7,6 @@ import ( "slices" "go.opentelemetry.io/collector/pdata/internal" - otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" "go.opentelemetry.io/collector/pdata/plog" @@ -16,14 +15,14 @@ import ( // ExportRequest represents the request for gRPC/HTTP client/server. // It's a wrapper for plog.Logs data. type ExportRequest struct { - orig *otlpcollectorlog.ExportLogsServiceRequest + orig *internal.ExportLogsServiceRequest state *internal.State } // NewExportRequest returns an empty ExportRequest. func NewExportRequest() ExportRequest { return ExportRequest{ - orig: &otlpcollectorlog.ExportLogsServiceRequest{}, + orig: &internal.ExportLogsServiceRequest{}, state: internal.NewState(), } } @@ -33,22 +32,22 @@ func NewExportRequest() ExportRequest { // any changes to the provided Logs struct will be reflected in the ExportRequest and vice versa. func NewExportRequestFromLogs(ld plog.Logs) ExportRequest { return ExportRequest{ - orig: internal.GetOrigLogs(internal.Logs(ld)), - state: internal.GetLogsState(internal.Logs(ld)), + orig: internal.GetLogsOrig(internal.LogsWrapper(ld)), + state: internal.GetLogsState(internal.LogsWrapper(ld)), } } // MarshalProto marshals ExportRequest into proto bytes. func (ms ExportRequest) MarshalProto() ([]byte, error) { - size := internal.SizeProtoOrigExportLogsServiceRequest(ms.orig) + size := ms.orig.SizeProto() buf := make([]byte, size) - _ = internal.MarshalProtoOrigExportLogsServiceRequest(ms.orig, buf) + _ = ms.orig.MarshalProto(buf) return buf, nil } // UnmarshalProto unmarshalls ExportRequest from proto bytes. func (ms ExportRequest) UnmarshalProto(data []byte) error { - err := internal.UnmarshalProtoOrigExportLogsServiceRequest(ms.orig, data) + err := ms.orig.UnmarshalProto(data) if err != nil { return err } @@ -60,7 +59,7 @@ func (ms ExportRequest) UnmarshalProto(data []byte) error { func (ms ExportRequest) MarshalJSON() ([]byte, error) { dest := json.BorrowStream(nil) defer json.ReturnStream(dest) - internal.MarshalJSONOrigExportLogsServiceRequest(ms.orig, dest) + ms.orig.MarshalJSON(dest) if dest.Error() != nil { return nil, dest.Error() } @@ -71,10 +70,10 @@ func (ms ExportRequest) MarshalJSON() ([]byte, error) { func (ms ExportRequest) UnmarshalJSON(data []byte) error { iter := json.BorrowIterator(data) defer json.ReturnIterator(iter) - internal.UnmarshalJSONOrigExportLogsServiceRequest(ms.orig, iter) + ms.orig.UnmarshalJSON(iter) return iter.Error() } func (ms ExportRequest) Logs() plog.Logs { - return plog.Logs(internal.NewLogs(ms.orig, ms.state)) + return plog.Logs(internal.NewLogsWrapper(ms.orig, ms.state)) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/response.go b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/response.go index 5a997ac14d7..8a332ba4e6f 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/response.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/response.go @@ -6,28 +6,27 @@ package plogotlp // import "go.opentelemetry.io/collector/pdata/plog/plogotlp" import ( "slices" - "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/internal/json" ) // MarshalProto marshals ExportResponse into proto bytes. func (ms ExportResponse) MarshalProto() ([]byte, error) { - size := internal.SizeProtoOrigExportLogsServiceResponse(ms.orig) + size := ms.orig.SizeProto() buf := make([]byte, size) - _ = internal.MarshalProtoOrigExportLogsServiceResponse(ms.orig, buf) + _ = ms.orig.MarshalProto(buf) return buf, nil } // UnmarshalProto unmarshalls ExportResponse from proto bytes. func (ms ExportResponse) UnmarshalProto(data []byte) error { - return internal.UnmarshalProtoOrigExportLogsServiceResponse(ms.orig, data) + return ms.orig.UnmarshalProto(data) } // MarshalJSON marshals ExportResponse into JSON bytes. func (ms ExportResponse) MarshalJSON() ([]byte, error) { dest := json.BorrowStream(nil) defer json.ReturnStream(dest) - internal.MarshalJSONOrigExportLogsServiceResponse(ms.orig, dest) + ms.orig.MarshalJSON(dest) return slices.Clone(dest.Buffer()), dest.Error() } @@ -35,6 +34,6 @@ func (ms ExportResponse) MarshalJSON() ([]byte, error) { func (ms ExportResponse) UnmarshalJSON(data []byte) error { iter := json.BorrowIterator(data) defer json.ReturnIterator(iter) - internal.UnmarshalJSONOrigExportLogsServiceResponse(ms.orig, iter) + ms.orig.UnmarshalJSON(iter) return iter.Error() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go b/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go index 53a9d4179c4..5df0690872b 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go @@ -4,38 +4,38 @@ package plog // import "go.opentelemetry.io/collector/pdata/plog" import ( - otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" + "go.opentelemetry.io/collector/pdata/internal" ) // SeverityNumber represents severity number of a log record. type SeverityNumber int32 const ( - SeverityNumberUnspecified = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED) - SeverityNumberTrace = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE) - SeverityNumberTrace2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE2) - SeverityNumberTrace3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE3) - SeverityNumberTrace4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE4) - SeverityNumberDebug = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG) - SeverityNumberDebug2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG2) - SeverityNumberDebug3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG3) - SeverityNumberDebug4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG4) - SeverityNumberInfo = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO) - SeverityNumberInfo2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO2) - SeverityNumberInfo3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO3) - SeverityNumberInfo4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO4) - SeverityNumberWarn = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN) - SeverityNumberWarn2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN2) - SeverityNumberWarn3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN3) - SeverityNumberWarn4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN4) - SeverityNumberError = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR) - SeverityNumberError2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR2) - SeverityNumberError3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR3) - SeverityNumberError4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR4) - SeverityNumberFatal = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL) - SeverityNumberFatal2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL2) - SeverityNumberFatal3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL3) - SeverityNumberFatal4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL4) + SeverityNumberUnspecified = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED) + SeverityNumberTrace = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_TRACE) + SeverityNumberTrace2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_TRACE2) + SeverityNumberTrace3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_TRACE3) + SeverityNumberTrace4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_TRACE4) + SeverityNumberDebug = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_DEBUG) + SeverityNumberDebug2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_DEBUG2) + SeverityNumberDebug3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_DEBUG3) + SeverityNumberDebug4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_DEBUG4) + SeverityNumberInfo = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_INFO) + SeverityNumberInfo2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_INFO2) + SeverityNumberInfo3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_INFO3) + SeverityNumberInfo4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_INFO4) + SeverityNumberWarn = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_WARN) + SeverityNumberWarn2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_WARN2) + SeverityNumberWarn3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_WARN3) + SeverityNumberWarn4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_WARN4) + SeverityNumberError = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_ERROR) + SeverityNumberError2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_ERROR2) + SeverityNumberError3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_ERROR3) + SeverityNumberError4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_ERROR4) + SeverityNumberFatal = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_FATAL) + SeverityNumberFatal2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_FATAL2) + SeverityNumberFatal3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_FATAL3) + SeverityNumberFatal4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_FATAL4) ) // String returns the string representation of the SeverityNumber. diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/aggregation_temporality.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/aggregation_temporality.go index 0ba2b28c29b..f2abb4f62b8 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/aggregation_temporality.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/aggregation_temporality.go @@ -4,7 +4,7 @@ package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" import ( - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/pdata/internal" ) // AggregationTemporality defines how a metric aggregator reports aggregated values. @@ -13,11 +13,11 @@ type AggregationTemporality int32 const ( // AggregationTemporalityUnspecified is the default AggregationTemporality, it MUST NOT be used. - AggregationTemporalityUnspecified = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED) + AggregationTemporalityUnspecified = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED) // AggregationTemporalityDelta is a AggregationTemporality for a metric aggregator which reports changes since last report time. - AggregationTemporalityDelta = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA) + AggregationTemporalityDelta = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA) // AggregationTemporalityCumulative is a AggregationTemporality for a metric aggregator which reports changes since a fixed start time. - AggregationTemporalityCumulative = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE) + AggregationTemporalityCumulative = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE) ) // String returns the string representation of the AggregationTemporality. diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplar.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplar.go index a827a4214c1..12c9984abc6 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplar.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplar.go @@ -8,8 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - "go.opentelemetry.io/collector/pdata/internal/data" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -24,11 +22,11 @@ import ( // Must use NewExemplar function to create new instances. // Important: zero-initialized instance is not valid for use. type Exemplar struct { - orig *otlpmetrics.Exemplar + orig *internal.Exemplar state *internal.State } -func newExemplar(orig *otlpmetrics.Exemplar, state *internal.State) Exemplar { +func newExemplar(orig *internal.Exemplar, state *internal.State) Exemplar { return Exemplar{orig: orig, state: state} } @@ -37,7 +35,7 @@ func newExemplar(orig *otlpmetrics.Exemplar, state *internal.State) Exemplar { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExemplar() Exemplar { - return newExemplar(internal.NewOrigExemplar(), internal.NewState()) + return newExemplar(internal.NewExemplar(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -49,13 +47,13 @@ func (ms Exemplar) MoveTo(dest Exemplar) { if ms.orig == dest.orig { return } - internal.DeleteOrigExemplar(dest.orig, false) + internal.DeleteExemplar(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // FilteredAttributes returns the FilteredAttributes associated with this Exemplar. func (ms Exemplar) FilteredAttributes() pcommon.Map { - return pcommon.Map(internal.NewMap(&ms.orig.FilteredAttributes, ms.state)) + return pcommon.Map(internal.NewMapWrapper(&ms.orig.FilteredAttributes, ms.state)) } // Timestamp returns the timestamp associated with this Exemplar. @@ -73,9 +71,9 @@ func (ms Exemplar) SetTimestamp(v pcommon.Timestamp) { // Calling this function on zero-initialized Exemplar will cause a panic. func (ms Exemplar) ValueType() ExemplarValueType { switch ms.orig.Value.(type) { - case *otlpmetrics.Exemplar_AsDouble: + case *internal.Exemplar_AsDouble: return ExemplarValueTypeDouble - case *otlpmetrics.Exemplar_AsInt: + case *internal.Exemplar_AsInt: return ExemplarValueTypeInt } return ExemplarValueTypeEmpty @@ -89,17 +87,15 @@ func (ms Exemplar) DoubleValue() float64 { // SetDoubleValue replaces the double associated with this Exemplar. func (ms Exemplar) SetDoubleValue(v float64) { ms.state.AssertMutable() - var ov *otlpmetrics.Exemplar_AsDouble + var ov *internal.Exemplar_AsDouble if !internal.UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Exemplar_AsDouble{} + ov = &internal.Exemplar_AsDouble{} } else { - ov = internal.ProtoPoolExemplar_AsDouble.Get().(*otlpmetrics.Exemplar_AsDouble) + ov = internal.ProtoPoolExemplar_AsDouble.Get().(*internal.Exemplar_AsDouble) } ov.AsDouble = v ms.orig.Value = ov -} - -// IntValue returns the int associated with this Exemplar. +} // IntValue returns the int associated with this Exemplar. func (ms Exemplar) IntValue() int64 { return ms.orig.GetAsInt() } @@ -107,27 +103,16 @@ func (ms Exemplar) IntValue() int64 { // SetIntValue replaces the int associated with this Exemplar. func (ms Exemplar) SetIntValue(v int64) { ms.state.AssertMutable() - var ov *otlpmetrics.Exemplar_AsInt + var ov *internal.Exemplar_AsInt if !internal.UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Exemplar_AsInt{} + ov = &internal.Exemplar_AsInt{} } else { - ov = internal.ProtoPoolExemplar_AsInt.Get().(*otlpmetrics.Exemplar_AsInt) + ov = internal.ProtoPoolExemplar_AsInt.Get().(*internal.Exemplar_AsInt) } ov.AsInt = v ms.orig.Value = ov } -// SpanID returns the spanid associated with this Exemplar. -func (ms Exemplar) SpanID() pcommon.SpanID { - return pcommon.SpanID(ms.orig.SpanId) -} - -// SetSpanID replaces the spanid associated with this Exemplar. -func (ms Exemplar) SetSpanID(v pcommon.SpanID) { - ms.state.AssertMutable() - ms.orig.SpanId = data.SpanID(v) -} - // TraceID returns the traceid associated with this Exemplar. func (ms Exemplar) TraceID() pcommon.TraceID { return pcommon.TraceID(ms.orig.TraceId) @@ -136,11 +121,22 @@ func (ms Exemplar) TraceID() pcommon.TraceID { // SetTraceID replaces the traceid associated with this Exemplar. func (ms Exemplar) SetTraceID(v pcommon.TraceID) { ms.state.AssertMutable() - ms.orig.TraceId = data.TraceID(v) + ms.orig.TraceId = internal.TraceID(v) +} + +// SpanID returns the spanid associated with this Exemplar. +func (ms Exemplar) SpanID() pcommon.SpanID { + return pcommon.SpanID(ms.orig.SpanId) +} + +// SetSpanID replaces the spanid associated with this Exemplar. +func (ms Exemplar) SetSpanID(v pcommon.SpanID) { + ms.state.AssertMutable() + ms.orig.SpanId = internal.SpanID(v) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Exemplar) CopyTo(dest Exemplar) { dest.state.AssertMutable() - internal.CopyOrigExemplar(dest.orig, ms.orig) + internal.CopyExemplar(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go index 24f54d61d1b..d6fb809cf7a 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go @@ -10,7 +10,6 @@ import ( "iter" "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // ExemplarSlice logically represents a slice of Exemplar. @@ -21,18 +20,18 @@ import ( // Must use NewExemplarSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ExemplarSlice struct { - orig *[]otlpmetrics.Exemplar + orig *[]internal.Exemplar state *internal.State } -func newExemplarSlice(orig *[]otlpmetrics.Exemplar, state *internal.State) ExemplarSlice { +func newExemplarSlice(orig *[]internal.Exemplar, state *internal.State) ExemplarSlice { return ExemplarSlice{orig: orig, state: state} } -// NewExemplarSlice creates a ExemplarSlice with 0 elements. +// NewExemplarSlice creates a ExemplarSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewExemplarSlice() ExemplarSlice { - orig := []otlpmetrics.Exemplar(nil) + orig := []internal.Exemplar(nil) return newExemplarSlice(&orig, internal.NewState()) } @@ -89,7 +88,7 @@ func (es ExemplarSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]otlpmetrics.Exemplar, len(*es.orig), newCap) + newOrig := make([]internal.Exemplar, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -98,7 +97,7 @@ func (es ExemplarSlice) EnsureCapacity(newCap int) { // It returns the newly added Exemplar. func (es ExemplarSlice) AppendEmpty() Exemplar { es.state.AssertMutable() - *es.orig = append(*es.orig, otlpmetrics.Exemplar{}) + *es.orig = append(*es.orig, internal.Exemplar{}) return es.At(es.Len() - 1) } @@ -127,7 +126,7 @@ func (es ExemplarSlice) RemoveIf(f func(Exemplar) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigExemplar(&(*es.orig)[i], false) + internal.DeleteExemplar(&(*es.orig)[i], false) continue } if newLen == i { @@ -148,5 +147,5 @@ func (es ExemplarSlice) CopyTo(dest ExemplarSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigExemplarSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyExemplarSlice(*dest.orig, *es.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogram.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogram.go index 660360a9747..9970cb5878c 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogram.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogram.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // ExponentialHistogram represents the type of a metric that is calculated by aggregating @@ -20,11 +19,11 @@ import ( // Must use NewExponentialHistogram function to create new instances. // Important: zero-initialized instance is not valid for use. type ExponentialHistogram struct { - orig *otlpmetrics.ExponentialHistogram + orig *internal.ExponentialHistogram state *internal.State } -func newExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, state *internal.State) ExponentialHistogram { +func newExponentialHistogram(orig *internal.ExponentialHistogram, state *internal.State) ExponentialHistogram { return ExponentialHistogram{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, state *inte // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExponentialHistogram() ExponentialHistogram { - return newExponentialHistogram(internal.NewOrigExponentialHistogram(), internal.NewState()) + return newExponentialHistogram(internal.NewExponentialHistogram(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,7 +44,7 @@ func (ms ExponentialHistogram) MoveTo(dest ExponentialHistogram) { if ms.orig == dest.orig { return } - internal.DeleteOrigExponentialHistogram(dest.orig, false) + internal.DeleteExponentialHistogram(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -62,11 +61,11 @@ func (ms ExponentialHistogram) AggregationTemporality() AggregationTemporality { // SetAggregationTemporality replaces the aggregationtemporality associated with this ExponentialHistogram. func (ms ExponentialHistogram) SetAggregationTemporality(v AggregationTemporality) { ms.state.AssertMutable() - ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v) + ms.orig.AggregationTemporality = internal.AggregationTemporality(v) } // CopyTo copies all properties from the current struct overriding the destination. func (ms ExponentialHistogram) CopyTo(dest ExponentialHistogram) { dest.state.AssertMutable() - internal.CopyOrigExponentialHistogram(dest.orig, ms.orig) + internal.CopyExponentialHistogram(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go index 96de0fa493c..d21bb141025 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -23,11 +22,11 @@ import ( // Must use NewExponentialHistogramDataPoint function to create new instances. // Important: zero-initialized instance is not valid for use. type ExponentialHistogramDataPoint struct { - orig *otlpmetrics.ExponentialHistogramDataPoint + orig *internal.ExponentialHistogramDataPoint state *internal.State } -func newExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPoint { +func newExponentialHistogramDataPoint(orig *internal.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPoint { return ExponentialHistogramDataPoint{orig: orig, state: state} } @@ -36,7 +35,7 @@ func newExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramData // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExponentialHistogramDataPoint() ExponentialHistogramDataPoint { - return newExponentialHistogramDataPoint(internal.NewOrigExponentialHistogramDataPoint(), internal.NewState()) + return newExponentialHistogramDataPoint(internal.NewExponentialHistogramDataPoint(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -48,13 +47,13 @@ func (ms ExponentialHistogramDataPoint) MoveTo(dest ExponentialHistogramDataPoin if ms.orig == dest.orig { return } - internal.DeleteOrigExponentialHistogramDataPoint(dest.orig, false) + internal.DeleteExponentialHistogramDataPoint(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // Attributes returns the Attributes associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Attributes() pcommon.Map { - return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) + return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state)) } // StartTimestamp returns the starttimestamp associated with this ExponentialHistogramDataPoint. @@ -92,25 +91,25 @@ func (ms ExponentialHistogramDataPoint) SetCount(v uint64) { // Sum returns the sum associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Sum() float64 { - return ms.orig.GetSum() + return ms.orig.Sum } // HasSum returns true if the ExponentialHistogramDataPoint contains a // Sum value otherwise. func (ms ExponentialHistogramDataPoint) HasSum() bool { - return ms.orig.Sum_ != nil + return ms.orig.HasSum() } // SetSum replaces the sum associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) SetSum(v float64) { ms.state.AssertMutable() - ms.orig.Sum_ = &otlpmetrics.ExponentialHistogramDataPoint_Sum{Sum: v} + ms.orig.SetSum(v) } // RemoveSum removes the sum associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) RemoveSum() { ms.state.AssertMutable() - ms.orig.Sum_ = nil + ms.orig.RemoveSum() } // Scale returns the scale associated with this ExponentialHistogramDataPoint. @@ -163,48 +162,48 @@ func (ms ExponentialHistogramDataPoint) Exemplars() ExemplarSlice { // Min returns the min associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Min() float64 { - return ms.orig.GetMin() + return ms.orig.Min } // HasMin returns true if the ExponentialHistogramDataPoint contains a // Min value otherwise. func (ms ExponentialHistogramDataPoint) HasMin() bool { - return ms.orig.Min_ != nil + return ms.orig.HasMin() } // SetMin replaces the min associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) SetMin(v float64) { ms.state.AssertMutable() - ms.orig.Min_ = &otlpmetrics.ExponentialHistogramDataPoint_Min{Min: v} + ms.orig.SetMin(v) } // RemoveMin removes the min associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) RemoveMin() { ms.state.AssertMutable() - ms.orig.Min_ = nil + ms.orig.RemoveMin() } // Max returns the max associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Max() float64 { - return ms.orig.GetMax() + return ms.orig.Max } // HasMax returns true if the ExponentialHistogramDataPoint contains a // Max value otherwise. func (ms ExponentialHistogramDataPoint) HasMax() bool { - return ms.orig.Max_ != nil + return ms.orig.HasMax() } // SetMax replaces the max associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) SetMax(v float64) { ms.state.AssertMutable() - ms.orig.Max_ = &otlpmetrics.ExponentialHistogramDataPoint_Max{Max: v} + ms.orig.SetMax(v) } // RemoveMax removes the max associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) RemoveMax() { ms.state.AssertMutable() - ms.orig.Max_ = nil + ms.orig.RemoveMax() } // ZeroThreshold returns the zerothreshold associated with this ExponentialHistogramDataPoint. @@ -221,5 +220,5 @@ func (ms ExponentialHistogramDataPoint) SetZeroThreshold(v float64) { // CopyTo copies all properties from the current struct overriding the destination. func (ms ExponentialHistogramDataPoint) CopyTo(dest ExponentialHistogramDataPoint) { dest.state.AssertMutable() - internal.CopyOrigExponentialHistogramDataPoint(dest.orig, ms.orig) + internal.CopyExponentialHistogramDataPoint(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointbuckets.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointbuckets.go index 54b6a6ee1b8..5864cd1a3d8 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointbuckets.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointbuckets.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewExponentialHistogramDataPointBuckets function to create new instances. // Important: zero-initialized instance is not valid for use. type ExponentialHistogramDataPointBuckets struct { - orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets + orig *internal.ExponentialHistogramDataPointBuckets state *internal.State } -func newExponentialHistogramDataPointBuckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, state *internal.State) ExponentialHistogramDataPointBuckets { +func newExponentialHistogramDataPointBuckets(orig *internal.ExponentialHistogramDataPointBuckets, state *internal.State) ExponentialHistogramDataPointBuckets { return ExponentialHistogramDataPointBuckets{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newExponentialHistogramDataPointBuckets(orig *otlpmetrics.ExponentialHistog // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExponentialHistogramDataPointBuckets() ExponentialHistogramDataPointBuckets { - return newExponentialHistogramDataPointBuckets(internal.NewOrigExponentialHistogramDataPoint_Buckets(), internal.NewState()) + return newExponentialHistogramDataPointBuckets(internal.NewExponentialHistogramDataPointBuckets(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,7 +44,7 @@ func (ms ExponentialHistogramDataPointBuckets) MoveTo(dest ExponentialHistogramD if ms.orig == dest.orig { return } - internal.DeleteOrigExponentialHistogramDataPoint_Buckets(dest.orig, false) + internal.DeleteExponentialHistogramDataPointBuckets(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -62,11 +61,11 @@ func (ms ExponentialHistogramDataPointBuckets) SetOffset(v int32) { // BucketCounts returns the BucketCounts associated with this ExponentialHistogramDataPointBuckets. func (ms ExponentialHistogramDataPointBuckets) BucketCounts() pcommon.UInt64Slice { - return pcommon.UInt64Slice(internal.NewUInt64Slice(&ms.orig.BucketCounts, ms.state)) + return pcommon.UInt64Slice(internal.NewUInt64SliceWrapper(&ms.orig.BucketCounts, ms.state)) } // CopyTo copies all properties from the current struct overriding the destination. func (ms ExponentialHistogramDataPointBuckets) CopyTo(dest ExponentialHistogramDataPointBuckets) { dest.state.AssertMutable() - internal.CopyOrigExponentialHistogramDataPoint_Buckets(dest.orig, ms.orig) + internal.CopyExponentialHistogramDataPointBuckets(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go index 819e783b356..c672d5cedaa 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // ExponentialHistogramDataPointSlice logically represents a slice of ExponentialHistogramDataPoint. @@ -22,18 +21,18 @@ import ( // Must use NewExponentialHistogramDataPointSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ExponentialHistogramDataPointSlice struct { - orig *[]*otlpmetrics.ExponentialHistogramDataPoint + orig *[]*internal.ExponentialHistogramDataPoint state *internal.State } -func newExponentialHistogramDataPointSlice(orig *[]*otlpmetrics.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPointSlice { +func newExponentialHistogramDataPointSlice(orig *[]*internal.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPointSlice { return ExponentialHistogramDataPointSlice{orig: orig, state: state} } -// NewExponentialHistogramDataPointSlice creates a ExponentialHistogramDataPointSlice with 0 elements. +// NewExponentialHistogramDataPointSlice creates a ExponentialHistogramDataPointSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewExponentialHistogramDataPointSlice() ExponentialHistogramDataPointSlice { - orig := []*otlpmetrics.ExponentialHistogramDataPoint(nil) + orig := []*internal.ExponentialHistogramDataPoint(nil) return newExponentialHistogramDataPointSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es ExponentialHistogramDataPointSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpmetrics.ExponentialHistogramDataPoint, len(*es.orig), newCap) + newOrig := make([]*internal.ExponentialHistogramDataPoint, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es ExponentialHistogramDataPointSlice) EnsureCapacity(newCap int) { // It returns the newly added ExponentialHistogramDataPoint. func (es ExponentialHistogramDataPointSlice) AppendEmpty() ExponentialHistogramDataPoint { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigExponentialHistogramDataPoint()) + *es.orig = append(*es.orig, internal.NewExponentialHistogramDataPoint()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es ExponentialHistogramDataPointSlice) RemoveIf(f func(ExponentialHistogra newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigExponentialHistogramDataPoint((*es.orig)[i], true) + internal.DeleteExponentialHistogramDataPoint((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es ExponentialHistogramDataPointSlice) CopyTo(dest ExponentialHistogramDat if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigExponentialHistogramDataPointSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyExponentialHistogramDataPointPtrSlice(*dest.orig, *es.orig) } // Sort sorts the ExponentialHistogramDataPoint elements within ExponentialHistogramDataPointSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_gauge.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_gauge.go index 35ae4bf3d1e..c05f403a6bc 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_gauge.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_gauge.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // Gauge represents the type of a numeric metric that always exports the "current value" for every data point. @@ -19,11 +18,11 @@ import ( // Must use NewGauge function to create new instances. // Important: zero-initialized instance is not valid for use. type Gauge struct { - orig *otlpmetrics.Gauge + orig *internal.Gauge state *internal.State } -func newGauge(orig *otlpmetrics.Gauge, state *internal.State) Gauge { +func newGauge(orig *internal.Gauge, state *internal.State) Gauge { return Gauge{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newGauge(orig *otlpmetrics.Gauge, state *internal.State) Gauge { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewGauge() Gauge { - return newGauge(internal.NewOrigGauge(), internal.NewState()) + return newGauge(internal.NewGauge(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms Gauge) MoveTo(dest Gauge) { if ms.orig == dest.orig { return } - internal.DeleteOrigGauge(dest.orig, false) + internal.DeleteGauge(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -56,5 +55,5 @@ func (ms Gauge) DataPoints() NumberDataPointSlice { // CopyTo copies all properties from the current struct overriding the destination. func (ms Gauge) CopyTo(dest Gauge) { dest.state.AssertMutable() - internal.CopyOrigGauge(dest.orig, ms.orig) + internal.CopyGauge(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogram.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogram.go index f96f70be290..924cd4f1d70 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogram.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogram.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval. @@ -19,11 +18,11 @@ import ( // Must use NewHistogram function to create new instances. // Important: zero-initialized instance is not valid for use. type Histogram struct { - orig *otlpmetrics.Histogram + orig *internal.Histogram state *internal.State } -func newHistogram(orig *otlpmetrics.Histogram, state *internal.State) Histogram { +func newHistogram(orig *internal.Histogram, state *internal.State) Histogram { return Histogram{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newHistogram(orig *otlpmetrics.Histogram, state *internal.State) Histogram // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewHistogram() Histogram { - return newHistogram(internal.NewOrigHistogram(), internal.NewState()) + return newHistogram(internal.NewHistogram(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms Histogram) MoveTo(dest Histogram) { if ms.orig == dest.orig { return } - internal.DeleteOrigHistogram(dest.orig, false) + internal.DeleteHistogram(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -61,11 +60,11 @@ func (ms Histogram) AggregationTemporality() AggregationTemporality { // SetAggregationTemporality replaces the aggregationtemporality associated with this Histogram. func (ms Histogram) SetAggregationTemporality(v AggregationTemporality) { ms.state.AssertMutable() - ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v) + ms.orig.AggregationTemporality = internal.AggregationTemporality(v) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Histogram) CopyTo(dest Histogram) { dest.state.AssertMutable() - internal.CopyOrigHistogram(dest.orig, ms.orig) + internal.CopyHistogram(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapoint.go index 3bbc2e18303..8426506ec66 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapoint.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapoint.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewHistogramDataPoint function to create new instances. // Important: zero-initialized instance is not valid for use. type HistogramDataPoint struct { - orig *otlpmetrics.HistogramDataPoint + orig *internal.HistogramDataPoint state *internal.State } -func newHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, state *internal.State) HistogramDataPoint { +func newHistogramDataPoint(orig *internal.HistogramDataPoint, state *internal.State) HistogramDataPoint { return HistogramDataPoint{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, state *internal // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewHistogramDataPoint() HistogramDataPoint { - return newHistogramDataPoint(internal.NewOrigHistogramDataPoint(), internal.NewState()) + return newHistogramDataPoint(internal.NewHistogramDataPoint(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,13 +44,13 @@ func (ms HistogramDataPoint) MoveTo(dest HistogramDataPoint) { if ms.orig == dest.orig { return } - internal.DeleteOrigHistogramDataPoint(dest.orig, false) + internal.DeleteHistogramDataPoint(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // Attributes returns the Attributes associated with this HistogramDataPoint. func (ms HistogramDataPoint) Attributes() pcommon.Map { - return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) + return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state)) } // StartTimestamp returns the starttimestamp associated with this HistogramDataPoint. @@ -89,35 +88,35 @@ func (ms HistogramDataPoint) SetCount(v uint64) { // Sum returns the sum associated with this HistogramDataPoint. func (ms HistogramDataPoint) Sum() float64 { - return ms.orig.GetSum() + return ms.orig.Sum } // HasSum returns true if the HistogramDataPoint contains a // Sum value otherwise. func (ms HistogramDataPoint) HasSum() bool { - return ms.orig.Sum_ != nil + return ms.orig.HasSum() } // SetSum replaces the sum associated with this HistogramDataPoint. func (ms HistogramDataPoint) SetSum(v float64) { ms.state.AssertMutable() - ms.orig.Sum_ = &otlpmetrics.HistogramDataPoint_Sum{Sum: v} + ms.orig.SetSum(v) } // RemoveSum removes the sum associated with this HistogramDataPoint. func (ms HistogramDataPoint) RemoveSum() { ms.state.AssertMutable() - ms.orig.Sum_ = nil + ms.orig.RemoveSum() } // BucketCounts returns the BucketCounts associated with this HistogramDataPoint. func (ms HistogramDataPoint) BucketCounts() pcommon.UInt64Slice { - return pcommon.UInt64Slice(internal.NewUInt64Slice(&ms.orig.BucketCounts, ms.state)) + return pcommon.UInt64Slice(internal.NewUInt64SliceWrapper(&ms.orig.BucketCounts, ms.state)) } // ExplicitBounds returns the ExplicitBounds associated with this HistogramDataPoint. func (ms HistogramDataPoint) ExplicitBounds() pcommon.Float64Slice { - return pcommon.Float64Slice(internal.NewFloat64Slice(&ms.orig.ExplicitBounds, ms.state)) + return pcommon.Float64Slice(internal.NewFloat64SliceWrapper(&ms.orig.ExplicitBounds, ms.state)) } // Exemplars returns the Exemplars associated with this HistogramDataPoint. @@ -138,52 +137,52 @@ func (ms HistogramDataPoint) SetFlags(v DataPointFlags) { // Min returns the min associated with this HistogramDataPoint. func (ms HistogramDataPoint) Min() float64 { - return ms.orig.GetMin() + return ms.orig.Min } // HasMin returns true if the HistogramDataPoint contains a // Min value otherwise. func (ms HistogramDataPoint) HasMin() bool { - return ms.orig.Min_ != nil + return ms.orig.HasMin() } // SetMin replaces the min associated with this HistogramDataPoint. func (ms HistogramDataPoint) SetMin(v float64) { ms.state.AssertMutable() - ms.orig.Min_ = &otlpmetrics.HistogramDataPoint_Min{Min: v} + ms.orig.SetMin(v) } // RemoveMin removes the min associated with this HistogramDataPoint. func (ms HistogramDataPoint) RemoveMin() { ms.state.AssertMutable() - ms.orig.Min_ = nil + ms.orig.RemoveMin() } // Max returns the max associated with this HistogramDataPoint. func (ms HistogramDataPoint) Max() float64 { - return ms.orig.GetMax() + return ms.orig.Max } // HasMax returns true if the HistogramDataPoint contains a // Max value otherwise. func (ms HistogramDataPoint) HasMax() bool { - return ms.orig.Max_ != nil + return ms.orig.HasMax() } // SetMax replaces the max associated with this HistogramDataPoint. func (ms HistogramDataPoint) SetMax(v float64) { ms.state.AssertMutable() - ms.orig.Max_ = &otlpmetrics.HistogramDataPoint_Max{Max: v} + ms.orig.SetMax(v) } // RemoveMax removes the max associated with this HistogramDataPoint. func (ms HistogramDataPoint) RemoveMax() { ms.state.AssertMutable() - ms.orig.Max_ = nil + ms.orig.RemoveMax() } // CopyTo copies all properties from the current struct overriding the destination. func (ms HistogramDataPoint) CopyTo(dest HistogramDataPoint) { dest.state.AssertMutable() - internal.CopyOrigHistogramDataPoint(dest.orig, ms.orig) + internal.CopyHistogramDataPoint(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go index 72f77725454..296c372b433 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // HistogramDataPointSlice logically represents a slice of HistogramDataPoint. @@ -22,18 +21,18 @@ import ( // Must use NewHistogramDataPointSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type HistogramDataPointSlice struct { - orig *[]*otlpmetrics.HistogramDataPoint + orig *[]*internal.HistogramDataPoint state *internal.State } -func newHistogramDataPointSlice(orig *[]*otlpmetrics.HistogramDataPoint, state *internal.State) HistogramDataPointSlice { +func newHistogramDataPointSlice(orig *[]*internal.HistogramDataPoint, state *internal.State) HistogramDataPointSlice { return HistogramDataPointSlice{orig: orig, state: state} } -// NewHistogramDataPointSlice creates a HistogramDataPointSlice with 0 elements. +// NewHistogramDataPointSlice creates a HistogramDataPointSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewHistogramDataPointSlice() HistogramDataPointSlice { - orig := []*otlpmetrics.HistogramDataPoint(nil) + orig := []*internal.HistogramDataPoint(nil) return newHistogramDataPointSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es HistogramDataPointSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpmetrics.HistogramDataPoint, len(*es.orig), newCap) + newOrig := make([]*internal.HistogramDataPoint, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es HistogramDataPointSlice) EnsureCapacity(newCap int) { // It returns the newly added HistogramDataPoint. func (es HistogramDataPointSlice) AppendEmpty() HistogramDataPoint { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigHistogramDataPoint()) + *es.orig = append(*es.orig, internal.NewHistogramDataPoint()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es HistogramDataPointSlice) RemoveIf(f func(HistogramDataPoint) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigHistogramDataPoint((*es.orig)[i], true) + internal.DeleteHistogramDataPoint((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es HistogramDataPointSlice) CopyTo(dest HistogramDataPointSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigHistogramDataPointSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyHistogramDataPointPtrSlice(*dest.orig, *es.orig) } // Sort sorts the HistogramDataPoint elements within HistogramDataPointSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metric.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metric.go index cd54c71fca1..792d38da8fd 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metric.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metric.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -21,11 +20,11 @@ import ( // Must use NewMetric function to create new instances. // Important: zero-initialized instance is not valid for use. type Metric struct { - orig *otlpmetrics.Metric + orig *internal.Metric state *internal.State } -func newMetric(orig *otlpmetrics.Metric, state *internal.State) Metric { +func newMetric(orig *internal.Metric, state *internal.State) Metric { return Metric{orig: orig, state: state} } @@ -34,7 +33,7 @@ func newMetric(orig *otlpmetrics.Metric, state *internal.State) Metric { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewMetric() Metric { - return newMetric(internal.NewOrigMetric(), internal.NewState()) + return newMetric(internal.NewMetric(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -46,7 +45,7 @@ func (ms Metric) MoveTo(dest Metric) { if ms.orig == dest.orig { return } - internal.DeleteOrigMetric(dest.orig, false) + internal.DeleteMetric(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -87,15 +86,15 @@ func (ms Metric) SetUnit(v string) { // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) Type() MetricType { switch ms.orig.Data.(type) { - case *otlpmetrics.Metric_Gauge: + case *internal.Metric_Gauge: return MetricTypeGauge - case *otlpmetrics.Metric_Sum: + case *internal.Metric_Sum: return MetricTypeSum - case *otlpmetrics.Metric_Histogram: + case *internal.Metric_Histogram: return MetricTypeHistogram - case *otlpmetrics.Metric_ExponentialHistogram: + case *internal.Metric_ExponentialHistogram: return MetricTypeExponentialHistogram - case *otlpmetrics.Metric_Summary: + case *internal.Metric_Summary: return MetricTypeSummary } return MetricTypeEmpty @@ -108,7 +107,7 @@ func (ms Metric) Type() MetricType { // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) Gauge() Gauge { - v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Gauge) + v, ok := ms.orig.GetData().(*internal.Metric_Gauge) if !ok { return Gauge{} } @@ -122,25 +121,22 @@ func (ms Metric) Gauge() Gauge { // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) SetEmptyGauge() Gauge { ms.state.AssertMutable() - var ov *otlpmetrics.Metric_Gauge + var ov *internal.Metric_Gauge if !internal.UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Gauge{} + ov = &internal.Metric_Gauge{} } else { - ov = internal.ProtoPoolMetric_Gauge.Get().(*otlpmetrics.Metric_Gauge) + ov = internal.ProtoPoolMetric_Gauge.Get().(*internal.Metric_Gauge) } - ov.Gauge = internal.NewOrigGauge() + ov.Gauge = internal.NewGauge() ms.orig.Data = ov return newGauge(ov.Gauge, ms.state) -} - -// Sum returns the sum associated with this Metric. -// +} // Sum returns the sum associated with this Metric. // Calling this function when Type() != MetricTypeSum returns an invalid // zero-initialized instance of Sum. Note that using such Sum instance can cause panic. // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) Sum() Sum { - v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Sum) + v, ok := ms.orig.GetData().(*internal.Metric_Sum) if !ok { return Sum{} } @@ -154,25 +150,22 @@ func (ms Metric) Sum() Sum { // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) SetEmptySum() Sum { ms.state.AssertMutable() - var ov *otlpmetrics.Metric_Sum + var ov *internal.Metric_Sum if !internal.UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Sum{} + ov = &internal.Metric_Sum{} } else { - ov = internal.ProtoPoolMetric_Sum.Get().(*otlpmetrics.Metric_Sum) + ov = internal.ProtoPoolMetric_Sum.Get().(*internal.Metric_Sum) } - ov.Sum = internal.NewOrigSum() + ov.Sum = internal.NewSum() ms.orig.Data = ov return newSum(ov.Sum, ms.state) -} - -// Histogram returns the histogram associated with this Metric. -// +} // Histogram returns the histogram associated with this Metric. // Calling this function when Type() != MetricTypeHistogram returns an invalid // zero-initialized instance of Histogram. Note that using such Histogram instance can cause panic. // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) Histogram() Histogram { - v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Histogram) + v, ok := ms.orig.GetData().(*internal.Metric_Histogram) if !ok { return Histogram{} } @@ -186,25 +179,22 @@ func (ms Metric) Histogram() Histogram { // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) SetEmptyHistogram() Histogram { ms.state.AssertMutable() - var ov *otlpmetrics.Metric_Histogram + var ov *internal.Metric_Histogram if !internal.UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Histogram{} + ov = &internal.Metric_Histogram{} } else { - ov = internal.ProtoPoolMetric_Histogram.Get().(*otlpmetrics.Metric_Histogram) + ov = internal.ProtoPoolMetric_Histogram.Get().(*internal.Metric_Histogram) } - ov.Histogram = internal.NewOrigHistogram() + ov.Histogram = internal.NewHistogram() ms.orig.Data = ov return newHistogram(ov.Histogram, ms.state) -} - -// ExponentialHistogram returns the exponentialhistogram associated with this Metric. -// +} // ExponentialHistogram returns the exponentialhistogram associated with this Metric. // Calling this function when Type() != MetricTypeExponentialHistogram returns an invalid // zero-initialized instance of ExponentialHistogram. Note that using such ExponentialHistogram instance can cause panic. // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) ExponentialHistogram() ExponentialHistogram { - v, ok := ms.orig.GetData().(*otlpmetrics.Metric_ExponentialHistogram) + v, ok := ms.orig.GetData().(*internal.Metric_ExponentialHistogram) if !ok { return ExponentialHistogram{} } @@ -218,25 +208,22 @@ func (ms Metric) ExponentialHistogram() ExponentialHistogram { // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) SetEmptyExponentialHistogram() ExponentialHistogram { ms.state.AssertMutable() - var ov *otlpmetrics.Metric_ExponentialHistogram + var ov *internal.Metric_ExponentialHistogram if !internal.UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_ExponentialHistogram{} + ov = &internal.Metric_ExponentialHistogram{} } else { - ov = internal.ProtoPoolMetric_ExponentialHistogram.Get().(*otlpmetrics.Metric_ExponentialHistogram) + ov = internal.ProtoPoolMetric_ExponentialHistogram.Get().(*internal.Metric_ExponentialHistogram) } - ov.ExponentialHistogram = internal.NewOrigExponentialHistogram() + ov.ExponentialHistogram = internal.NewExponentialHistogram() ms.orig.Data = ov return newExponentialHistogram(ov.ExponentialHistogram, ms.state) -} - -// Summary returns the summary associated with this Metric. -// +} // Summary returns the summary associated with this Metric. // Calling this function when Type() != MetricTypeSummary returns an invalid // zero-initialized instance of Summary. Note that using such Summary instance can cause panic. // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) Summary() Summary { - v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Summary) + v, ok := ms.orig.GetData().(*internal.Metric_Summary) if !ok { return Summary{} } @@ -250,24 +237,24 @@ func (ms Metric) Summary() Summary { // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) SetEmptySummary() Summary { ms.state.AssertMutable() - var ov *otlpmetrics.Metric_Summary + var ov *internal.Metric_Summary if !internal.UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.Metric_Summary{} + ov = &internal.Metric_Summary{} } else { - ov = internal.ProtoPoolMetric_Summary.Get().(*otlpmetrics.Metric_Summary) + ov = internal.ProtoPoolMetric_Summary.Get().(*internal.Metric_Summary) } - ov.Summary = internal.NewOrigSummary() + ov.Summary = internal.NewSummary() ms.orig.Data = ov return newSummary(ov.Summary, ms.state) } // Metadata returns the Metadata associated with this Metric. func (ms Metric) Metadata() pcommon.Map { - return pcommon.Map(internal.NewMap(&ms.orig.Metadata, ms.state)) + return pcommon.Map(internal.NewMapWrapper(&ms.orig.Metadata, ms.state)) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Metric) CopyTo(dest Metric) { dest.state.AssertMutable() - internal.CopyOrigMetric(dest.orig, ms.orig) + internal.CopyMetric(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metrics.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metrics.go index 09c6600ccd7..9d873d5ee0b 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metrics.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metrics.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" ) // Metrics is the top-level struct that is propagated through the metrics pipeline. @@ -19,10 +18,10 @@ import ( // // Must use NewMetrics function to create new instances. // Important: zero-initialized instance is not valid for use. -type Metrics internal.Metrics +type Metrics internal.MetricsWrapper -func newMetrics(orig *otlpcollectormetrics.ExportMetricsServiceRequest, state *internal.State) Metrics { - return Metrics(internal.NewMetrics(orig, state)) +func newMetrics(orig *internal.ExportMetricsServiceRequest, state *internal.State) Metrics { + return Metrics(internal.NewMetricsWrapper(orig, state)) } // NewMetrics creates a new empty Metrics. @@ -30,7 +29,7 @@ func newMetrics(orig *otlpcollectormetrics.ExportMetricsServiceRequest, state *i // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewMetrics() Metrics { - return newMetrics(internal.NewOrigExportMetricsServiceRequest(), internal.NewState()) + return newMetrics(internal.NewExportMetricsServiceRequest(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -42,7 +41,7 @@ func (ms Metrics) MoveTo(dest Metrics) { if ms.getOrig() == dest.getOrig() { return } - internal.DeleteOrigExportMetricsServiceRequest(dest.getOrig(), false) + internal.DeleteExportMetricsServiceRequest(dest.getOrig(), false) *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig() } @@ -54,13 +53,13 @@ func (ms Metrics) ResourceMetrics() ResourceMetricsSlice { // CopyTo copies all properties from the current struct overriding the destination. func (ms Metrics) CopyTo(dest Metrics) { dest.getState().AssertMutable() - internal.CopyOrigExportMetricsServiceRequest(dest.getOrig(), ms.getOrig()) + internal.CopyExportMetricsServiceRequest(dest.getOrig(), ms.getOrig()) } -func (ms Metrics) getOrig() *otlpcollectormetrics.ExportMetricsServiceRequest { - return internal.GetOrigMetrics(internal.Metrics(ms)) +func (ms Metrics) getOrig() *internal.ExportMetricsServiceRequest { + return internal.GetMetricsOrig(internal.MetricsWrapper(ms)) } func (ms Metrics) getState() *internal.State { - return internal.GetMetricsState(internal.Metrics(ms)) + return internal.GetMetricsState(internal.MetricsWrapper(ms)) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go index c74e7a451f5..3c8d26481e8 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // MetricSlice logically represents a slice of Metric. @@ -22,18 +21,18 @@ import ( // Must use NewMetricSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type MetricSlice struct { - orig *[]*otlpmetrics.Metric + orig *[]*internal.Metric state *internal.State } -func newMetricSlice(orig *[]*otlpmetrics.Metric, state *internal.State) MetricSlice { +func newMetricSlice(orig *[]*internal.Metric, state *internal.State) MetricSlice { return MetricSlice{orig: orig, state: state} } -// NewMetricSlice creates a MetricSlice with 0 elements. +// NewMetricSlice creates a MetricSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewMetricSlice() MetricSlice { - orig := []*otlpmetrics.Metric(nil) + orig := []*internal.Metric(nil) return newMetricSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es MetricSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpmetrics.Metric, len(*es.orig), newCap) + newOrig := make([]*internal.Metric, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es MetricSlice) EnsureCapacity(newCap int) { // It returns the newly added Metric. func (es MetricSlice) AppendEmpty() Metric { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigMetric()) + *es.orig = append(*es.orig, internal.NewMetric()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es MetricSlice) RemoveIf(f func(Metric) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigMetric((*es.orig)[i], true) + internal.DeleteMetric((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es MetricSlice) CopyTo(dest MetricSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigMetricSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyMetricPtrSlice(*dest.orig, *es.orig) } // Sort sorts the Metric elements within MetricSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapoint.go index 52a441ee8ba..8dc926bbef4 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapoint.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapoint.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewNumberDataPoint function to create new instances. // Important: zero-initialized instance is not valid for use. type NumberDataPoint struct { - orig *otlpmetrics.NumberDataPoint + orig *internal.NumberDataPoint state *internal.State } -func newNumberDataPoint(orig *otlpmetrics.NumberDataPoint, state *internal.State) NumberDataPoint { +func newNumberDataPoint(orig *internal.NumberDataPoint, state *internal.State) NumberDataPoint { return NumberDataPoint{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newNumberDataPoint(orig *otlpmetrics.NumberDataPoint, state *internal.State // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewNumberDataPoint() NumberDataPoint { - return newNumberDataPoint(internal.NewOrigNumberDataPoint(), internal.NewState()) + return newNumberDataPoint(internal.NewNumberDataPoint(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,13 +44,13 @@ func (ms NumberDataPoint) MoveTo(dest NumberDataPoint) { if ms.orig == dest.orig { return } - internal.DeleteOrigNumberDataPoint(dest.orig, false) + internal.DeleteNumberDataPoint(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // Attributes returns the Attributes associated with this NumberDataPoint. func (ms NumberDataPoint) Attributes() pcommon.Map { - return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) + return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state)) } // StartTimestamp returns the starttimestamp associated with this NumberDataPoint. @@ -80,9 +79,9 @@ func (ms NumberDataPoint) SetTimestamp(v pcommon.Timestamp) { // Calling this function on zero-initialized NumberDataPoint will cause a panic. func (ms NumberDataPoint) ValueType() NumberDataPointValueType { switch ms.orig.Value.(type) { - case *otlpmetrics.NumberDataPoint_AsDouble: + case *internal.NumberDataPoint_AsDouble: return NumberDataPointValueTypeDouble - case *otlpmetrics.NumberDataPoint_AsInt: + case *internal.NumberDataPoint_AsInt: return NumberDataPointValueTypeInt } return NumberDataPointValueTypeEmpty @@ -96,17 +95,15 @@ func (ms NumberDataPoint) DoubleValue() float64 { // SetDoubleValue replaces the double associated with this NumberDataPoint. func (ms NumberDataPoint) SetDoubleValue(v float64) { ms.state.AssertMutable() - var ov *otlpmetrics.NumberDataPoint_AsDouble + var ov *internal.NumberDataPoint_AsDouble if !internal.UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.NumberDataPoint_AsDouble{} + ov = &internal.NumberDataPoint_AsDouble{} } else { - ov = internal.ProtoPoolNumberDataPoint_AsDouble.Get().(*otlpmetrics.NumberDataPoint_AsDouble) + ov = internal.ProtoPoolNumberDataPoint_AsDouble.Get().(*internal.NumberDataPoint_AsDouble) } ov.AsDouble = v ms.orig.Value = ov -} - -// IntValue returns the int associated with this NumberDataPoint. +} // IntValue returns the int associated with this NumberDataPoint. func (ms NumberDataPoint) IntValue() int64 { return ms.orig.GetAsInt() } @@ -114,11 +111,11 @@ func (ms NumberDataPoint) IntValue() int64 { // SetIntValue replaces the int associated with this NumberDataPoint. func (ms NumberDataPoint) SetIntValue(v int64) { ms.state.AssertMutable() - var ov *otlpmetrics.NumberDataPoint_AsInt + var ov *internal.NumberDataPoint_AsInt if !internal.UseProtoPooling.IsEnabled() { - ov = &otlpmetrics.NumberDataPoint_AsInt{} + ov = &internal.NumberDataPoint_AsInt{} } else { - ov = internal.ProtoPoolNumberDataPoint_AsInt.Get().(*otlpmetrics.NumberDataPoint_AsInt) + ov = internal.ProtoPoolNumberDataPoint_AsInt.Get().(*internal.NumberDataPoint_AsInt) } ov.AsInt = v ms.orig.Value = ov @@ -143,5 +140,5 @@ func (ms NumberDataPoint) SetFlags(v DataPointFlags) { // CopyTo copies all properties from the current struct overriding the destination. func (ms NumberDataPoint) CopyTo(dest NumberDataPoint) { dest.state.AssertMutable() - internal.CopyOrigNumberDataPoint(dest.orig, ms.orig) + internal.CopyNumberDataPoint(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go index a691f92026d..0daafa7e7a4 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // NumberDataPointSlice logically represents a slice of NumberDataPoint. @@ -22,18 +21,18 @@ import ( // Must use NewNumberDataPointSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type NumberDataPointSlice struct { - orig *[]*otlpmetrics.NumberDataPoint + orig *[]*internal.NumberDataPoint state *internal.State } -func newNumberDataPointSlice(orig *[]*otlpmetrics.NumberDataPoint, state *internal.State) NumberDataPointSlice { +func newNumberDataPointSlice(orig *[]*internal.NumberDataPoint, state *internal.State) NumberDataPointSlice { return NumberDataPointSlice{orig: orig, state: state} } -// NewNumberDataPointSlice creates a NumberDataPointSlice with 0 elements. +// NewNumberDataPointSlice creates a NumberDataPointSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewNumberDataPointSlice() NumberDataPointSlice { - orig := []*otlpmetrics.NumberDataPoint(nil) + orig := []*internal.NumberDataPoint(nil) return newNumberDataPointSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es NumberDataPointSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpmetrics.NumberDataPoint, len(*es.orig), newCap) + newOrig := make([]*internal.NumberDataPoint, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es NumberDataPointSlice) EnsureCapacity(newCap int) { // It returns the newly added NumberDataPoint. func (es NumberDataPointSlice) AppendEmpty() NumberDataPoint { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigNumberDataPoint()) + *es.orig = append(*es.orig, internal.NewNumberDataPoint()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es NumberDataPointSlice) RemoveIf(f func(NumberDataPoint) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigNumberDataPoint((*es.orig)[i], true) + internal.DeleteNumberDataPoint((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es NumberDataPointSlice) CopyTo(dest NumberDataPointSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigNumberDataPointSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyNumberDataPointPtrSlice(*dest.orig, *es.orig) } // Sort sorts the NumberDataPoint elements within NumberDataPointSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetrics.go index 51cd24b74d3..a7c3dbc061e 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetrics.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetrics.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewResourceMetrics function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceMetrics struct { - orig *otlpmetrics.ResourceMetrics + orig *internal.ResourceMetrics state *internal.State } -func newResourceMetrics(orig *otlpmetrics.ResourceMetrics, state *internal.State) ResourceMetrics { +func newResourceMetrics(orig *internal.ResourceMetrics, state *internal.State) ResourceMetrics { return ResourceMetrics{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newResourceMetrics(orig *otlpmetrics.ResourceMetrics, state *internal.State // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewResourceMetrics() ResourceMetrics { - return newResourceMetrics(internal.NewOrigResourceMetrics(), internal.NewState()) + return newResourceMetrics(internal.NewResourceMetrics(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,13 +44,13 @@ func (ms ResourceMetrics) MoveTo(dest ResourceMetrics) { if ms.orig == dest.orig { return } - internal.DeleteOrigResourceMetrics(dest.orig, false) + internal.DeleteResourceMetrics(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // Resource returns the resource associated with this ResourceMetrics. func (ms ResourceMetrics) Resource() pcommon.Resource { - return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state)) + return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state)) } // ScopeMetrics returns the ScopeMetrics associated with this ResourceMetrics. @@ -73,5 +72,5 @@ func (ms ResourceMetrics) SetSchemaUrl(v string) { // CopyTo copies all properties from the current struct overriding the destination. func (ms ResourceMetrics) CopyTo(dest ResourceMetrics) { dest.state.AssertMutable() - internal.CopyOrigResourceMetrics(dest.orig, ms.orig) + internal.CopyResourceMetrics(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go index 52103da495e..d434ad3d24e 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // ResourceMetricsSlice logically represents a slice of ResourceMetrics. @@ -22,18 +21,18 @@ import ( // Must use NewResourceMetricsSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceMetricsSlice struct { - orig *[]*otlpmetrics.ResourceMetrics + orig *[]*internal.ResourceMetrics state *internal.State } -func newResourceMetricsSlice(orig *[]*otlpmetrics.ResourceMetrics, state *internal.State) ResourceMetricsSlice { +func newResourceMetricsSlice(orig *[]*internal.ResourceMetrics, state *internal.State) ResourceMetricsSlice { return ResourceMetricsSlice{orig: orig, state: state} } -// NewResourceMetricsSlice creates a ResourceMetricsSlice with 0 elements. +// NewResourceMetricsSlice creates a ResourceMetricsSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewResourceMetricsSlice() ResourceMetricsSlice { - orig := []*otlpmetrics.ResourceMetrics(nil) + orig := []*internal.ResourceMetrics(nil) return newResourceMetricsSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es ResourceMetricsSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpmetrics.ResourceMetrics, len(*es.orig), newCap) + newOrig := make([]*internal.ResourceMetrics, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es ResourceMetricsSlice) EnsureCapacity(newCap int) { // It returns the newly added ResourceMetrics. func (es ResourceMetricsSlice) AppendEmpty() ResourceMetrics { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigResourceMetrics()) + *es.orig = append(*es.orig, internal.NewResourceMetrics()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es ResourceMetricsSlice) RemoveIf(f func(ResourceMetrics) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigResourceMetrics((*es.orig)[i], true) + internal.DeleteResourceMetrics((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es ResourceMetricsSlice) CopyTo(dest ResourceMetricsSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigResourceMetricsSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyResourceMetricsPtrSlice(*dest.orig, *es.orig) } // Sort sorts the ResourceMetrics elements within ResourceMetricsSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetrics.go index 0050ee5a602..563153a491f 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetrics.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetrics.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewScopeMetrics function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeMetrics struct { - orig *otlpmetrics.ScopeMetrics + orig *internal.ScopeMetrics state *internal.State } -func newScopeMetrics(orig *otlpmetrics.ScopeMetrics, state *internal.State) ScopeMetrics { +func newScopeMetrics(orig *internal.ScopeMetrics, state *internal.State) ScopeMetrics { return ScopeMetrics{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newScopeMetrics(orig *otlpmetrics.ScopeMetrics, state *internal.State) Scop // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewScopeMetrics() ScopeMetrics { - return newScopeMetrics(internal.NewOrigScopeMetrics(), internal.NewState()) + return newScopeMetrics(internal.NewScopeMetrics(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,13 +44,13 @@ func (ms ScopeMetrics) MoveTo(dest ScopeMetrics) { if ms.orig == dest.orig { return } - internal.DeleteOrigScopeMetrics(dest.orig, false) + internal.DeleteScopeMetrics(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // Scope returns the scope associated with this ScopeMetrics. func (ms ScopeMetrics) Scope() pcommon.InstrumentationScope { - return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state)) + return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state)) } // Metrics returns the Metrics associated with this ScopeMetrics. @@ -73,5 +72,5 @@ func (ms ScopeMetrics) SetSchemaUrl(v string) { // CopyTo copies all properties from the current struct overriding the destination. func (ms ScopeMetrics) CopyTo(dest ScopeMetrics) { dest.state.AssertMutable() - internal.CopyOrigScopeMetrics(dest.orig, ms.orig) + internal.CopyScopeMetrics(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go index 7850d9e7253..553e38a3c4c 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // ScopeMetricsSlice logically represents a slice of ScopeMetrics. @@ -22,18 +21,18 @@ import ( // Must use NewScopeMetricsSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeMetricsSlice struct { - orig *[]*otlpmetrics.ScopeMetrics + orig *[]*internal.ScopeMetrics state *internal.State } -func newScopeMetricsSlice(orig *[]*otlpmetrics.ScopeMetrics, state *internal.State) ScopeMetricsSlice { +func newScopeMetricsSlice(orig *[]*internal.ScopeMetrics, state *internal.State) ScopeMetricsSlice { return ScopeMetricsSlice{orig: orig, state: state} } -// NewScopeMetricsSlice creates a ScopeMetricsSlice with 0 elements. +// NewScopeMetricsSlice creates a ScopeMetricsSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewScopeMetricsSlice() ScopeMetricsSlice { - orig := []*otlpmetrics.ScopeMetrics(nil) + orig := []*internal.ScopeMetrics(nil) return newScopeMetricsSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es ScopeMetricsSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpmetrics.ScopeMetrics, len(*es.orig), newCap) + newOrig := make([]*internal.ScopeMetrics, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es ScopeMetricsSlice) EnsureCapacity(newCap int) { // It returns the newly added ScopeMetrics. func (es ScopeMetricsSlice) AppendEmpty() ScopeMetrics { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigScopeMetrics()) + *es.orig = append(*es.orig, internal.NewScopeMetrics()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es ScopeMetricsSlice) RemoveIf(f func(ScopeMetrics) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigScopeMetrics((*es.orig)[i], true) + internal.DeleteScopeMetrics((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es ScopeMetricsSlice) CopyTo(dest ScopeMetricsSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigScopeMetricsSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyScopeMetricsPtrSlice(*dest.orig, *es.orig) } // Sort sorts the ScopeMetrics elements within ScopeMetricsSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_sum.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_sum.go index 0f5b7f11323..a56ece7dc53 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_sum.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_sum.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // Sum represents the type of a numeric metric that is calculated as a sum of all reported measurements over a time interval. @@ -19,11 +18,11 @@ import ( // Must use NewSum function to create new instances. // Important: zero-initialized instance is not valid for use. type Sum struct { - orig *otlpmetrics.Sum + orig *internal.Sum state *internal.State } -func newSum(orig *otlpmetrics.Sum, state *internal.State) Sum { +func newSum(orig *internal.Sum, state *internal.State) Sum { return Sum{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newSum(orig *otlpmetrics.Sum, state *internal.State) Sum { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSum() Sum { - return newSum(internal.NewOrigSum(), internal.NewState()) + return newSum(internal.NewSum(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms Sum) MoveTo(dest Sum) { if ms.orig == dest.orig { return } - internal.DeleteOrigSum(dest.orig, false) + internal.DeleteSum(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -61,7 +60,7 @@ func (ms Sum) AggregationTemporality() AggregationTemporality { // SetAggregationTemporality replaces the aggregationtemporality associated with this Sum. func (ms Sum) SetAggregationTemporality(v AggregationTemporality) { ms.state.AssertMutable() - ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v) + ms.orig.AggregationTemporality = internal.AggregationTemporality(v) } // IsMonotonic returns the ismonotonic associated with this Sum. @@ -78,5 +77,5 @@ func (ms Sum) SetIsMonotonic(v bool) { // CopyTo copies all properties from the current struct overriding the destination. func (ms Sum) CopyTo(dest Sum) { dest.state.AssertMutable() - internal.CopyOrigSum(dest.orig, ms.orig) + internal.CopySum(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summary.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summary.go index e247537dc20..c6ec844d4d6 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summary.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summary.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // Summary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval. @@ -19,11 +18,11 @@ import ( // Must use NewSummary function to create new instances. // Important: zero-initialized instance is not valid for use. type Summary struct { - orig *otlpmetrics.Summary + orig *internal.Summary state *internal.State } -func newSummary(orig *otlpmetrics.Summary, state *internal.State) Summary { +func newSummary(orig *internal.Summary, state *internal.State) Summary { return Summary{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newSummary(orig *otlpmetrics.Summary, state *internal.State) Summary { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSummary() Summary { - return newSummary(internal.NewOrigSummary(), internal.NewState()) + return newSummary(internal.NewSummary(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms Summary) MoveTo(dest Summary) { if ms.orig == dest.orig { return } - internal.DeleteOrigSummary(dest.orig, false) + internal.DeleteSummary(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -56,5 +55,5 @@ func (ms Summary) DataPoints() SummaryDataPointSlice { // CopyTo copies all properties from the current struct overriding the destination. func (ms Summary) CopyTo(dest Summary) { dest.state.AssertMutable() - internal.CopyOrigSummary(dest.orig, ms.orig) + internal.CopySummary(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapoint.go index 7d4fa34db0d..715ef7247d1 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapoint.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapoint.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewSummaryDataPoint function to create new instances. // Important: zero-initialized instance is not valid for use. type SummaryDataPoint struct { - orig *otlpmetrics.SummaryDataPoint + orig *internal.SummaryDataPoint state *internal.State } -func newSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, state *internal.State) SummaryDataPoint { +func newSummaryDataPoint(orig *internal.SummaryDataPoint, state *internal.State) SummaryDataPoint { return SummaryDataPoint{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, state *internal.Sta // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSummaryDataPoint() SummaryDataPoint { - return newSummaryDataPoint(internal.NewOrigSummaryDataPoint(), internal.NewState()) + return newSummaryDataPoint(internal.NewSummaryDataPoint(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,13 +44,13 @@ func (ms SummaryDataPoint) MoveTo(dest SummaryDataPoint) { if ms.orig == dest.orig { return } - internal.DeleteOrigSummaryDataPoint(dest.orig, false) + internal.DeleteSummaryDataPoint(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // Attributes returns the Attributes associated with this SummaryDataPoint. func (ms SummaryDataPoint) Attributes() pcommon.Map { - return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) + return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state)) } // StartTimestamp returns the starttimestamp associated with this SummaryDataPoint. @@ -117,5 +116,5 @@ func (ms SummaryDataPoint) SetFlags(v DataPointFlags) { // CopyTo copies all properties from the current struct overriding the destination. func (ms SummaryDataPoint) CopyTo(dest SummaryDataPoint) { dest.state.AssertMutable() - internal.CopyOrigSummaryDataPoint(dest.orig, ms.orig) + internal.CopySummaryDataPoint(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go index 26b52e75fe9..0a76d5ec332 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // SummaryDataPointSlice logically represents a slice of SummaryDataPoint. @@ -22,18 +21,18 @@ import ( // Must use NewSummaryDataPointSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type SummaryDataPointSlice struct { - orig *[]*otlpmetrics.SummaryDataPoint + orig *[]*internal.SummaryDataPoint state *internal.State } -func newSummaryDataPointSlice(orig *[]*otlpmetrics.SummaryDataPoint, state *internal.State) SummaryDataPointSlice { +func newSummaryDataPointSlice(orig *[]*internal.SummaryDataPoint, state *internal.State) SummaryDataPointSlice { return SummaryDataPointSlice{orig: orig, state: state} } -// NewSummaryDataPointSlice creates a SummaryDataPointSlice with 0 elements. +// NewSummaryDataPointSlice creates a SummaryDataPointSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewSummaryDataPointSlice() SummaryDataPointSlice { - orig := []*otlpmetrics.SummaryDataPoint(nil) + orig := []*internal.SummaryDataPoint(nil) return newSummaryDataPointSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es SummaryDataPointSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpmetrics.SummaryDataPoint, len(*es.orig), newCap) + newOrig := make([]*internal.SummaryDataPoint, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es SummaryDataPointSlice) EnsureCapacity(newCap int) { // It returns the newly added SummaryDataPoint. func (es SummaryDataPointSlice) AppendEmpty() SummaryDataPoint { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigSummaryDataPoint()) + *es.orig = append(*es.orig, internal.NewSummaryDataPoint()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es SummaryDataPointSlice) RemoveIf(f func(SummaryDataPoint) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigSummaryDataPoint((*es.orig)[i], true) + internal.DeleteSummaryDataPoint((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es SummaryDataPointSlice) CopyTo(dest SummaryDataPointSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigSummaryDataPointSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopySummaryDataPointPtrSlice(*dest.orig, *es.orig) } // Sort sorts the SummaryDataPoint elements within SummaryDataPointSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantile.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantile.go index 093c1e45592..8275d2ecefa 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantile.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantile.go @@ -8,7 +8,6 @@ package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // SummaryDataPointValueAtQuantile is a quantile value within a Summary data point. @@ -19,11 +18,11 @@ import ( // Must use NewSummaryDataPointValueAtQuantile function to create new instances. // Important: zero-initialized instance is not valid for use. type SummaryDataPointValueAtQuantile struct { - orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile + orig *internal.SummaryDataPointValueAtQuantile state *internal.State } -func newSummaryDataPointValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantile { +func newSummaryDataPointValueAtQuantile(orig *internal.SummaryDataPointValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantile { return SummaryDataPointValueAtQuantile{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newSummaryDataPointValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_Value // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSummaryDataPointValueAtQuantile() SummaryDataPointValueAtQuantile { - return newSummaryDataPointValueAtQuantile(internal.NewOrigSummaryDataPoint_ValueAtQuantile(), internal.NewState()) + return newSummaryDataPointValueAtQuantile(internal.NewSummaryDataPointValueAtQuantile(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms SummaryDataPointValueAtQuantile) MoveTo(dest SummaryDataPointValueAtQua if ms.orig == dest.orig { return } - internal.DeleteOrigSummaryDataPoint_ValueAtQuantile(dest.orig, false) + internal.DeleteSummaryDataPointValueAtQuantile(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -73,5 +72,5 @@ func (ms SummaryDataPointValueAtQuantile) SetValue(v float64) { // CopyTo copies all properties from the current struct overriding the destination. func (ms SummaryDataPointValueAtQuantile) CopyTo(dest SummaryDataPointValueAtQuantile) { dest.state.AssertMutable() - internal.CopyOrigSummaryDataPoint_ValueAtQuantile(dest.orig, ms.orig) + internal.CopySummaryDataPointValueAtQuantile(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go index 544bf5b125c..0aa50533057 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // SummaryDataPointValueAtQuantileSlice logically represents a slice of SummaryDataPointValueAtQuantile. @@ -22,18 +21,18 @@ import ( // Must use NewSummaryDataPointValueAtQuantileSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type SummaryDataPointValueAtQuantileSlice struct { - orig *[]*otlpmetrics.SummaryDataPoint_ValueAtQuantile + orig *[]*internal.SummaryDataPointValueAtQuantile state *internal.State } -func newSummaryDataPointValueAtQuantileSlice(orig *[]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantileSlice { +func newSummaryDataPointValueAtQuantileSlice(orig *[]*internal.SummaryDataPointValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantileSlice { return SummaryDataPointValueAtQuantileSlice{orig: orig, state: state} } -// NewSummaryDataPointValueAtQuantileSlice creates a SummaryDataPointValueAtQuantileSlice with 0 elements. +// NewSummaryDataPointValueAtQuantileSlice creates a SummaryDataPointValueAtQuantileSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewSummaryDataPointValueAtQuantileSlice() SummaryDataPointValueAtQuantileSlice { - orig := []*otlpmetrics.SummaryDataPoint_ValueAtQuantile(nil) + orig := []*internal.SummaryDataPointValueAtQuantile(nil) return newSummaryDataPointValueAtQuantileSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es SummaryDataPointValueAtQuantileSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, len(*es.orig), newCap) + newOrig := make([]*internal.SummaryDataPointValueAtQuantile, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es SummaryDataPointValueAtQuantileSlice) EnsureCapacity(newCap int) { // It returns the newly added SummaryDataPointValueAtQuantile. func (es SummaryDataPointValueAtQuantileSlice) AppendEmpty() SummaryDataPointValueAtQuantile { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigSummaryDataPoint_ValueAtQuantile()) + *es.orig = append(*es.orig, internal.NewSummaryDataPointValueAtQuantile()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es SummaryDataPointValueAtQuantileSlice) RemoveIf(f func(SummaryDataPointV newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigSummaryDataPoint_ValueAtQuantile((*es.orig)[i], true) + internal.DeleteSummaryDataPointValueAtQuantile((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es SummaryDataPointValueAtQuantileSlice) CopyTo(dest SummaryDataPointValue if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigSummaryDataPoint_ValueAtQuantileSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopySummaryDataPointValueAtQuantilePtrSlice(*dest.orig, *es.orig) } // Sort sorts the SummaryDataPointValueAtQuantile elements within SummaryDataPointValueAtQuantileSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go index 48913985974..cd3825f7f13 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go @@ -6,7 +6,6 @@ package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" import ( "slices" - "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" ) @@ -20,7 +19,7 @@ type JSONMarshaler struct{} func (*JSONMarshaler) MarshalMetrics(md Metrics) ([]byte, error) { dest := json.BorrowStream(nil) defer json.ReturnStream(dest) - internal.MarshalJSONOrigExportMetricsServiceRequest(md.getOrig(), dest) + md.getOrig().MarshalJSON(dest) if dest.Error() != nil { return nil, dest.Error() } @@ -35,7 +34,7 @@ func (*JSONUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) { iter := json.BorrowIterator(buf) defer json.ReturnIterator(iter) md := NewMetrics() - internal.UnmarshalJSONOrigExportMetricsServiceRequest(md.getOrig(), iter) + md.getOrig().UnmarshalJSON(iter) if iter.Error() != nil { return Metrics{}, iter.Error() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go index b9a8564d2b8..bf703a764a7 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go @@ -3,58 +3,54 @@ package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" -import ( - "go.opentelemetry.io/collector/pdata/internal" -) - var _ MarshalSizer = (*ProtoMarshaler)(nil) type ProtoMarshaler struct{} func (e *ProtoMarshaler) MarshalMetrics(md Metrics) ([]byte, error) { - size := internal.SizeProtoOrigExportMetricsServiceRequest(md.getOrig()) + size := md.getOrig().SizeProto() buf := make([]byte, size) - _ = internal.MarshalProtoOrigExportMetricsServiceRequest(md.getOrig(), buf) + _ = md.getOrig().MarshalProto(buf) return buf, nil } func (e *ProtoMarshaler) MetricsSize(md Metrics) int { - return internal.SizeProtoOrigExportMetricsServiceRequest(md.getOrig()) + return md.getOrig().SizeProto() } func (e *ProtoMarshaler) ResourceMetricsSize(md ResourceMetrics) int { - return internal.SizeProtoOrigResourceMetrics(md.orig) + return md.orig.SizeProto() } func (e *ProtoMarshaler) ScopeMetricsSize(md ScopeMetrics) int { - return internal.SizeProtoOrigScopeMetrics(md.orig) + return md.orig.SizeProto() } func (e *ProtoMarshaler) MetricSize(md Metric) int { - return internal.SizeProtoOrigMetric(md.orig) + return md.orig.SizeProto() } func (e *ProtoMarshaler) NumberDataPointSize(md NumberDataPoint) int { - return internal.SizeProtoOrigNumberDataPoint(md.orig) + return md.orig.SizeProto() } func (e *ProtoMarshaler) SummaryDataPointSize(md SummaryDataPoint) int { - return internal.SizeProtoOrigSummaryDataPoint(md.orig) + return md.orig.SizeProto() } func (e *ProtoMarshaler) HistogramDataPointSize(md HistogramDataPoint) int { - return internal.SizeProtoOrigHistogramDataPoint(md.orig) + return md.orig.SizeProto() } func (e *ProtoMarshaler) ExponentialHistogramDataPointSize(md ExponentialHistogramDataPoint) int { - return internal.SizeProtoOrigExponentialHistogramDataPoint(md.orig) + return md.orig.SizeProto() } type ProtoUnmarshaler struct{} func (d *ProtoUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) { md := NewMetrics() - err := internal.UnmarshalProtoOrigExportMetricsServiceRequest(md.getOrig(), buf) + err := md.getOrig().UnmarshalProto(buf) if err != nil { return Metrics{}, err } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportpartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportpartialsuccess.go index bebd88e32ce..df57abed3a8 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportpartialsuccess.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportpartialsuccess.go @@ -8,7 +8,6 @@ package pmetricotlp import ( "go.opentelemetry.io/collector/pdata/internal" - otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" ) // ExportPartialSuccess represents the details of a partially successful export request. @@ -19,11 +18,11 @@ import ( // Must use NewExportPartialSuccess function to create new instances. // Important: zero-initialized instance is not valid for use. type ExportPartialSuccess struct { - orig *otlpcollectormetrics.ExportMetricsPartialSuccess + orig *internal.ExportMetricsPartialSuccess state *internal.State } -func newExportPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, state *internal.State) ExportPartialSuccess { +func newExportPartialSuccess(orig *internal.ExportMetricsPartialSuccess, state *internal.State) ExportPartialSuccess { return ExportPartialSuccess{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newExportPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSucc // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExportPartialSuccess() ExportPartialSuccess { - return newExportPartialSuccess(internal.NewOrigExportMetricsPartialSuccess(), internal.NewState()) + return newExportPartialSuccess(internal.NewExportMetricsPartialSuccess(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) { if ms.orig == dest.orig { return } - internal.DeleteOrigExportMetricsPartialSuccess(dest.orig, false) + internal.DeleteExportMetricsPartialSuccess(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -73,5 +72,5 @@ func (ms ExportPartialSuccess) SetErrorMessage(v string) { // CopyTo copies all properties from the current struct overriding the destination. func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) { dest.state.AssertMutable() - internal.CopyOrigExportMetricsPartialSuccess(dest.orig, ms.orig) + internal.CopyExportMetricsPartialSuccess(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportresponse.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportresponse.go index 5592ce857b3..53323987f1b 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportresponse.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportresponse.go @@ -8,7 +8,6 @@ package pmetricotlp import ( "go.opentelemetry.io/collector/pdata/internal" - otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" ) // ExportResponse represents the response for gRPC/HTTP client/server. @@ -19,11 +18,11 @@ import ( // Must use NewExportResponse function to create new instances. // Important: zero-initialized instance is not valid for use. type ExportResponse struct { - orig *otlpcollectormetrics.ExportMetricsServiceResponse + orig *internal.ExportMetricsServiceResponse state *internal.State } -func newExportResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, state *internal.State) ExportResponse { +func newExportResponse(orig *internal.ExportMetricsServiceResponse, state *internal.State) ExportResponse { return ExportResponse{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newExportResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExportResponse() ExportResponse { - return newExportResponse(internal.NewOrigExportMetricsServiceResponse(), internal.NewState()) + return newExportResponse(internal.NewExportMetricsServiceResponse(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms ExportResponse) MoveTo(dest ExportResponse) { if ms.orig == dest.orig { return } - internal.DeleteOrigExportMetricsServiceResponse(dest.orig, false) + internal.DeleteExportMetricsServiceResponse(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -56,5 +55,5 @@ func (ms ExportResponse) PartialSuccess() ExportPartialSuccess { // CopyTo copies all properties from the current struct overriding the destination. func (ms ExportResponse) CopyTo(dest ExportResponse) { dest.state.AssertMutable() - internal.CopyOrigExportMetricsServiceResponse(dest.orig, ms.orig) + internal.CopyExportMetricsServiceResponse(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/grpc.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/grpc.go index 105ac451bd3..656f0e17650 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/grpc.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/grpc.go @@ -11,8 +11,7 @@ import ( "google.golang.org/grpc/status" "go.opentelemetry.io/collector/pdata/internal" - otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" - _ "go.opentelemetry.io/collector/pdata/internal/grpcencoding" // enforces custom gRPC encoding to be loaded. + "go.opentelemetry.io/collector/pdata/internal/otelgrpc" "go.opentelemetry.io/collector/pdata/internal/otlp" ) @@ -32,11 +31,11 @@ type GRPCClient interface { // NewGRPCClient returns a new GRPCClient connected using the given connection. func NewGRPCClient(cc *grpc.ClientConn) GRPCClient { - return &grpcClient{rawClient: otlpcollectormetrics.NewMetricsServiceClient(cc)} + return &grpcClient{rawClient: otelgrpc.NewMetricsServiceClient(cc)} } type grpcClient struct { - rawClient otlpcollectormetrics.MetricsServiceClient + rawClient otelgrpc.MetricsServiceClient } func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) { @@ -75,14 +74,14 @@ func (*UnimplementedGRPCServer) unexported() {} // RegisterGRPCServer registers the GRPCServer to the grpc.Server. func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) { - otlpcollectormetrics.RegisterMetricsServiceServer(s, &rawMetricsServer{srv: srv}) + otelgrpc.RegisterMetricsServiceServer(s, &rawMetricsServer{srv: srv}) } type rawMetricsServer struct { srv GRPCServer } -func (s rawMetricsServer) Export(ctx context.Context, request *otlpcollectormetrics.ExportMetricsServiceRequest) (*otlpcollectormetrics.ExportMetricsServiceResponse, error) { +func (s rawMetricsServer) Export(ctx context.Context, request *internal.ExportMetricsServiceRequest) (*internal.ExportMetricsServiceResponse, error) { otlp.MigrateMetrics(request.ResourceMetrics) rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: internal.NewState()}) return rsp.orig, err diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/request.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/request.go index e8eb742a96b..895c0681834 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/request.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/request.go @@ -7,7 +7,6 @@ import ( "slices" "go.opentelemetry.io/collector/pdata/internal" - otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" "go.opentelemetry.io/collector/pdata/pmetric" @@ -16,14 +15,14 @@ import ( // ExportRequest represents the request for gRPC/HTTP client/server. // It's a wrapper for pmetric.Metrics data. type ExportRequest struct { - orig *otlpcollectormetrics.ExportMetricsServiceRequest + orig *internal.ExportMetricsServiceRequest state *internal.State } // NewExportRequest returns an empty ExportRequest. func NewExportRequest() ExportRequest { return ExportRequest{ - orig: &otlpcollectormetrics.ExportMetricsServiceRequest{}, + orig: &internal.ExportMetricsServiceRequest{}, state: internal.NewState(), } } @@ -33,22 +32,22 @@ func NewExportRequest() ExportRequest { // any changes to the provided Metrics struct will be reflected in the ExportRequest and vice versa. func NewExportRequestFromMetrics(md pmetric.Metrics) ExportRequest { return ExportRequest{ - orig: internal.GetOrigMetrics(internal.Metrics(md)), - state: internal.GetMetricsState(internal.Metrics(md)), + orig: internal.GetMetricsOrig(internal.MetricsWrapper(md)), + state: internal.GetMetricsState(internal.MetricsWrapper(md)), } } // MarshalProto marshals ExportRequest into proto bytes. func (ms ExportRequest) MarshalProto() ([]byte, error) { - size := internal.SizeProtoOrigExportMetricsServiceRequest(ms.orig) + size := ms.orig.SizeProto() buf := make([]byte, size) - _ = internal.MarshalProtoOrigExportMetricsServiceRequest(ms.orig, buf) + _ = ms.orig.MarshalProto(buf) return buf, nil } // UnmarshalProto unmarshalls ExportRequest from proto bytes. func (ms ExportRequest) UnmarshalProto(data []byte) error { - err := internal.UnmarshalProtoOrigExportMetricsServiceRequest(ms.orig, data) + err := ms.orig.UnmarshalProto(data) if err != nil { return err } @@ -60,7 +59,7 @@ func (ms ExportRequest) UnmarshalProto(data []byte) error { func (ms ExportRequest) MarshalJSON() ([]byte, error) { dest := json.BorrowStream(nil) defer json.ReturnStream(dest) - internal.MarshalJSONOrigExportMetricsServiceRequest(ms.orig, dest) + ms.orig.MarshalJSON(dest) if dest.Error() != nil { return nil, dest.Error() } @@ -71,10 +70,10 @@ func (ms ExportRequest) MarshalJSON() ([]byte, error) { func (ms ExportRequest) UnmarshalJSON(data []byte) error { iter := json.BorrowIterator(data) defer json.ReturnIterator(iter) - internal.UnmarshalJSONOrigExportMetricsServiceRequest(ms.orig, iter) + ms.orig.UnmarshalJSON(iter) return iter.Error() } func (ms ExportRequest) Metrics() pmetric.Metrics { - return pmetric.Metrics(internal.NewMetrics(ms.orig, ms.state)) + return pmetric.Metrics(internal.NewMetricsWrapper(ms.orig, ms.state)) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/response.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/response.go index b34d5350f80..fe985edf2da 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/response.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/response.go @@ -6,28 +6,27 @@ package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetr import ( "slices" - "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/internal/json" ) // MarshalProto marshals ExportResponse into proto bytes. func (ms ExportResponse) MarshalProto() ([]byte, error) { - size := internal.SizeProtoOrigExportMetricsServiceResponse(ms.orig) + size := ms.orig.SizeProto() buf := make([]byte, size) - _ = internal.MarshalProtoOrigExportMetricsServiceResponse(ms.orig, buf) + _ = ms.orig.MarshalProto(buf) return buf, nil } // UnmarshalProto unmarshalls ExportResponse from proto bytes. func (ms ExportResponse) UnmarshalProto(data []byte) error { - return internal.UnmarshalProtoOrigExportMetricsServiceResponse(ms.orig, data) + return ms.orig.UnmarshalProto(data) } // MarshalJSON marshals ExportResponse into JSON bytes. func (ms ExportResponse) MarshalJSON() ([]byte, error) { dest := json.BorrowStream(nil) defer json.ReturnStream(dest) - internal.MarshalJSONOrigExportMetricsServiceResponse(ms.orig, dest) + ms.orig.MarshalJSON(dest) return slices.Clone(dest.Buffer()), dest.Error() } @@ -35,6 +34,6 @@ func (ms ExportResponse) MarshalJSON() ([]byte, error) { func (ms ExportResponse) UnmarshalJSON(data []byte) error { iter := json.BorrowIterator(data) defer json.ReturnIterator(iter) - internal.UnmarshalJSONOrigExportMetricsServiceResponse(ms.orig, iter) + ms.orig.UnmarshalJSON(iter) return iter.Error() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/aggregation_temporality.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/aggregation_temporality.go index b79f8b04097..8855f1f6be6 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/aggregation_temporality.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/aggregation_temporality.go @@ -4,24 +4,34 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" import ( - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/internal" ) // AggregationTemporality specifies the method of aggregating metric values, // either DELTA (change since last report) or CUMULATIVE (total since a fixed // start time). +// +// Deprecated: [v0.146.0] Type was removed without replacement in the Profiles signal. type AggregationTemporality int32 const ( // AggregationTemporalityUnspecified is the default AggregationTemporality, it MUST NOT be used. - AggregationTemporalityUnspecified = AggregationTemporality(otlpprofiles.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED) + // + // Deprecated: [v0.146.0] This is no longer supported by the Profiles signal. + AggregationTemporalityUnspecified = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED) // AggregationTemporalityDelta is a AggregationTemporality for a metric aggregator which reports changes since last report time. - AggregationTemporalityDelta = AggregationTemporality(otlpprofiles.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA) + // + // Deprecated: [v0.146.0] This is no longer supported by the Profiles signal. + AggregationTemporalityDelta = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA) // AggregationTemporalityCumulative is a AggregationTemporality for a metric aggregator which reports changes since a fixed start time. - AggregationTemporalityCumulative = AggregationTemporality(otlpprofiles.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE) + // + // Deprecated: [v0.146.0] This is no longer supported by the Profiles signal. + AggregationTemporalityCumulative = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE) ) // String returns the string representation of the AggregationTemporality. +// +// Deprecated: [v0.146.0] Type was removed without replacement in the Profiles signal. func (at AggregationTemporality) String() string { switch at { case AggregationTemporalityUnspecified: diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/attributes.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/attributes.go index fb004ac2baf..7ba37948ae0 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/attributes.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/attributes.go @@ -5,7 +5,6 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" import ( "errors" - "fmt" "math" "go.opentelemetry.io/collector/pdata/pcommon" @@ -34,96 +33,6 @@ func FromAttributeIndices(table KeyValueAndUnitSlice, record attributable, dic P var errTooManyAttributeTableEntries = errors.New("too many entries in AttributeTable") -// PutAttribute updates an AttributeTable and a record's AttributeIndices to -// add or update an attribute. -// The assumption is that attributes are a map as for other signals (metrics, logs, etc.), thus -// the same key must not appear twice in a list of attributes / attribute indices. -// The record can be any struct that implements an `AttributeIndices` method. -// -// Deprecated: [v0.138.0] use SetAttribute instead. -func PutAttribute(table KeyValueAndUnitSlice, record attributable, dic ProfilesDictionary, key string, value pcommon.Value) error { - for i := range record.AttributeIndices().Len() { - idx := int(record.AttributeIndices().At(i)) - if idx < 0 || idx >= table.Len() { - return fmt.Errorf("index value %d out of range in AttributeIndices[%d]", idx, i) - } - attr := table.At(idx) - - if dic.StringTable().At(int(attr.KeyStrindex())) == key { - if attr.Value().Equal(value) { - // Attribute already exists, nothing to do. - return nil - } - - // If the attribute table already contains the key/value pair, just update the index. - for j := range table.Len() { - a := table.At(j) - if dic.StringTable().At(int(a.KeyStrindex())) == key && a.Value().Equal(value) { - if j > math.MaxInt32 { - return errTooManyAttributeTableEntries - } - record.AttributeIndices().SetAt(i, int32(j)) //nolint:gosec // overflow checked - return nil - } - } - - if table.Len() >= math.MaxInt32 { - return errTooManyAttributeTableEntries - } - - // Find the key in the StringTable, or add it - keyID, err := SetString(dic.StringTable(), key) - if err != nil { - return err - } - - // Add the key/value pair as a new attribute to the table... - entry := table.AppendEmpty() - entry.SetKeyStrindex(keyID) - value.CopyTo(entry.Value()) - - // ...and update the existing index. - record.AttributeIndices().SetAt(i, int32(table.Len()-1)) //nolint:gosec // overflow checked - return nil - } - } - - if record.AttributeIndices().Len() >= math.MaxInt32 { - return errors.New("too many entries in AttributeIndices") - } - - for j := range table.Len() { - a := table.At(j) - if dic.StringTable().At(int(a.KeyStrindex())) == key && a.Value().Equal(value) { - if j > math.MaxInt32 { - return errTooManyAttributeTableEntries - } - // Add the index of the existing attribute to the indices. - record.AttributeIndices().Append(int32(j)) //nolint:gosec // overflow checked - return nil - } - } - - if table.Len() >= math.MaxInt32 { - return errTooManyAttributeTableEntries - } - - // Find the key in the StringTable, or add it - keyID, err := SetString(dic.StringTable(), key) - if err != nil { - return err - } - - // Add the key/value pair as a new attribute to the table... - entry := table.AppendEmpty() - entry.SetKeyStrindex(keyID) - value.CopyTo(entry.Value()) - - // ...and add a new index to the indices. - record.AttributeIndices().Append(int32(table.Len() - 1)) //nolint:gosec // overflow checked - return nil -} - // SetAttribute updates an AttributeTable, adding or providing a value and // returns its index. func SetAttribute(table KeyValueAndUnitSlice, attr KeyValueAndUnit) (int32, error) { @@ -132,7 +41,7 @@ func SetAttribute(table KeyValueAndUnitSlice, attr KeyValueAndUnit) (int32, erro if j > math.MaxInt32 { return 0, errTooManyAttributeTableEntries } - return int32(j), nil //nolint:gosec // G115 overflow checked + return int32(j), nil } } @@ -141,5 +50,5 @@ func SetAttribute(table KeyValueAndUnitSlice, attr KeyValueAndUnit) (int32, erro } attr.CopyTo(table.AppendEmpty()) - return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked + return int32(table.Len() - 1), nil } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/function.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/function.go index 4da4e605333..10bb1af9126 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/function.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/function.go @@ -3,6 +3,8 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" +import "fmt" + // Equal checks equality with another Function func (fn Function) Equal(val Function) bool { return fn.NameStrindex() == val.NameStrindex() && @@ -10,3 +12,45 @@ func (fn Function) Equal(val Function) bool { fn.FilenameStrindex() == val.FilenameStrindex() && fn.StartLine() == val.StartLine() } + +// switchDictionary updates the Function, switching its indices from one +// dictionary to another. +func (fn Function) switchDictionary(src, dst ProfilesDictionary) error { + if fn.NameStrindex() > 0 { + if src.StringTable().Len() <= int(fn.NameStrindex()) { + return fmt.Errorf("invalid name index %d", fn.NameStrindex()) + } + + idx, err := SetString(dst.StringTable(), src.StringTable().At(int(fn.NameStrindex()))) + if err != nil { + return fmt.Errorf("couldn't set name: %w", err) + } + fn.SetNameStrindex(idx) + } + + if fn.SystemNameStrindex() > 0 { + if src.StringTable().Len() <= int(fn.SystemNameStrindex()) { + return fmt.Errorf("invalid system name index %d", fn.SystemNameStrindex()) + } + + idx, err := SetString(dst.StringTable(), src.StringTable().At(int(fn.SystemNameStrindex()))) + if err != nil { + return fmt.Errorf("couldn't set system name: %w", err) + } + fn.SetSystemNameStrindex(idx) + } + + if fn.FilenameStrindex() > 0 { + if src.StringTable().Len() <= int(fn.FilenameStrindex()) { + return fmt.Errorf("invalid filename index %d", fn.FilenameStrindex()) + } + + idx, err := SetString(dst.StringTable(), src.StringTable().At(int(fn.FilenameStrindex()))) + if err != nil { + return fmt.Errorf("couldn't set filename: %w", err) + } + fn.SetFilenameStrindex(idx) + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/functions.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/functions.go index b67e680f1e7..e97f182042c 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/functions.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/functions.go @@ -5,43 +5,27 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" import ( "errors" - "fmt" "math" ) var errTooManyFunctionTableEntries = errors.New("too many entries in FunctionTable") -// SetFunction updates a FunctionTable and a Line's FunctionIndex to -// add or update a function. -func SetFunction(table FunctionSlice, record Line, fn Function) error { - idx := int(record.FunctionIndex()) - if idx > 0 { - if idx >= table.Len() { - return fmt.Errorf("index value %d out of range for FunctionIndex", idx) - } - mapAt := table.At(idx) - if mapAt.Equal(fn) { - // Function already exists, nothing to do. - return nil - } - } - +// SetFunction updates a FunctionTable, adding or providing a value and returns +// its index. +func SetFunction(table FunctionSlice, fn Function) (int32, error) { for j, m := range table.All() { if m.Equal(fn) { if j > math.MaxInt32 { - return errTooManyFunctionTableEntries + return 0, errTooManyFunctionTableEntries } - // Add the index of the existing function to the indices. - record.SetFunctionIndex(int32(j)) //nolint:gosec // G115 overflow checked - return nil + return int32(j), nil } } if table.Len() >= math.MaxInt32 { - return errTooManyFunctionTableEntries + return 0, errTooManyFunctionTableEntries } fn.CopyTo(table.AppendEmpty()) - record.SetFunctionIndex(int32(table.Len() - 1)) //nolint:gosec // G115 overflow checked - return nil + return int32(table.Len() - 1), nil } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attribute.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attribute.go deleted file mode 100644 index b3ee86d3b72..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attribute.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package pprofile - -import ( - "go.opentelemetry.io/collector/pdata/internal" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - "go.opentelemetry.io/collector/pdata/pcommon" -) - -// Attribute describes an attribute stored in a profile's attribute table. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewAttribute function to create new instances. -// Important: zero-initialized instance is not valid for use. -type Attribute struct { - orig *otlpcommon.KeyValue - state *internal.State -} - -func newAttribute(orig *otlpcommon.KeyValue, state *internal.State) Attribute { - return Attribute{orig: orig, state: state} -} - -// NewAttribute creates a new empty Attribute. -// -// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, -// OR directly access the member if this is embedded in another struct. -func NewAttribute() Attribute { - return newAttribute(internal.NewOrigKeyValue(), internal.NewState()) -} - -// MoveTo moves all properties from the current struct overriding the destination and -// resetting the current instance to its zero value -func (ms Attribute) MoveTo(dest Attribute) { - ms.state.AssertMutable() - dest.state.AssertMutable() - // If they point to the same data, they are the same, nothing to do. - if ms.orig == dest.orig { - return - } - internal.DeleteOrigKeyValue(dest.orig, false) - *dest.orig, *ms.orig = *ms.orig, *dest.orig -} - -// Key returns the key associated with this Attribute. -func (ms Attribute) Key() string { - return ms.orig.Key -} - -// SetKey replaces the key associated with this Attribute. -func (ms Attribute) SetKey(v string) { - ms.state.AssertMutable() - ms.orig.Key = v -} - -// Value returns the value associated with this Attribute. -func (ms Attribute) Value() pcommon.Value { - return pcommon.Value(internal.NewValue(&ms.orig.Value, ms.state)) -} - -// CopyTo copies all properties from the current struct overriding the destination. -func (ms Attribute) CopyTo(dest Attribute) { - dest.state.AssertMutable() - internal.CopyOrigKeyValue(dest.orig, ms.orig) -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributetableslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributetableslice.go deleted file mode 100644 index 6caf8a1b3da..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributetableslice.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "make genpdata". - -package pprofile - -import ( - "iter" - - "go.opentelemetry.io/collector/pdata/internal" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" -) - -// AttributeTableSlice logically represents a slice of Attribute. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewAttributeTableSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type AttributeTableSlice struct { - orig *[]otlpcommon.KeyValue - state *internal.State -} - -func newAttributeTableSlice(orig *[]otlpcommon.KeyValue, state *internal.State) AttributeTableSlice { - return AttributeTableSlice{orig: orig, state: state} -} - -// NewAttributeTableSlice creates a AttributeTableSlice with 0 elements. -// Can use "EnsureCapacity" to initialize with a given capacity. -func NewAttributeTableSlice() AttributeTableSlice { - orig := []otlpcommon.KeyValue(nil) - return newAttributeTableSlice(&orig, internal.NewState()) -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewAttributeTableSlice()". -func (es AttributeTableSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es AttributeTableSlice) At(i int) Attribute { - return newAttribute(&(*es.orig)[i], es.state) -} - -// All returns an iterator over index-value pairs in the slice. -// -// for i, v := range es.All() { -// ... // Do something with index-value pair -// } -func (es AttributeTableSlice) All() iter.Seq2[int, Attribute] { - return func(yield func(int, Attribute) bool) { - for i := 0; i < es.Len(); i++ { - if !yield(i, es.At(i)) { - return - } - } - } -} - -// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. -// 1. If the newCap <= cap then no change in capacity. -// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. -// -// Here is how a new AttributeTableSlice can be initialized: -// -// es := NewAttributeTableSlice() -// es.EnsureCapacity(4) -// for i := 0; i < 4; i++ { -// e := es.AppendEmpty() -// // Here should set all the values for e. -// } -func (es AttributeTableSlice) EnsureCapacity(newCap int) { - es.state.AssertMutable() - oldCap := cap(*es.orig) - if newCap <= oldCap { - return - } - - newOrig := make([]otlpcommon.KeyValue, len(*es.orig), newCap) - copy(newOrig, *es.orig) - *es.orig = newOrig -} - -// AppendEmpty will append to the end of the slice an empty Attribute. -// It returns the newly added Attribute. -func (es AttributeTableSlice) AppendEmpty() Attribute { - es.state.AssertMutable() - *es.orig = append(*es.orig, otlpcommon.KeyValue{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es AttributeTableSlice) MoveAndAppendTo(dest AttributeTableSlice) { - es.state.AssertMutable() - dest.state.AssertMutable() - // If they point to the same data, they are the same, nothing to do. - if es.orig == dest.orig { - return - } - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es AttributeTableSlice) RemoveIf(f func(Attribute) bool) { - es.state.AssertMutable() - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - internal.DeleteOrigKeyValue(&(*es.orig)[i], false) - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - (*es.orig)[i].Reset() - newLen++ - } - *es.orig = (*es.orig)[:newLen] -} - -// CopyTo copies all elements from the current slice overriding the destination. -func (es AttributeTableSlice) CopyTo(dest AttributeTableSlice) { - dest.state.AssertMutable() - if es.orig == dest.orig { - return - } - *dest.orig = internal.CopyOrigKeyValueSlice(*dest.orig, *es.orig) -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go index ecfb5893fad..a8d52b47958 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go @@ -8,7 +8,6 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // Function describes a function, including its human-readable name, system name, source file, and starting line number in the source. @@ -19,11 +18,11 @@ import ( // Must use NewFunction function to create new instances. // Important: zero-initialized instance is not valid for use. type Function struct { - orig *otlpprofiles.Function + orig *internal.Function state *internal.State } -func newFunction(orig *otlpprofiles.Function, state *internal.State) Function { +func newFunction(orig *internal.Function, state *internal.State) Function { return Function{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newFunction(orig *otlpprofiles.Function, state *internal.State) Function { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewFunction() Function { - return newFunction(internal.NewOrigFunction(), internal.NewState()) + return newFunction(internal.NewFunction(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms Function) MoveTo(dest Function) { if ms.orig == dest.orig { return } - internal.DeleteOrigFunction(dest.orig, false) + internal.DeleteFunction(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -95,5 +94,5 @@ func (ms Function) SetStartLine(v int64) { // CopyTo copies all properties from the current struct overriding the destination. func (ms Function) CopyTo(dest Function) { dest.state.AssertMutable() - internal.CopyOrigFunction(dest.orig, ms.orig) + internal.CopyFunction(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go index 054b7c21934..cc8f073773d 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // FunctionSlice logically represents a slice of Function. @@ -22,18 +21,18 @@ import ( // Must use NewFunctionSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type FunctionSlice struct { - orig *[]*otlpprofiles.Function + orig *[]*internal.Function state *internal.State } -func newFunctionSlice(orig *[]*otlpprofiles.Function, state *internal.State) FunctionSlice { +func newFunctionSlice(orig *[]*internal.Function, state *internal.State) FunctionSlice { return FunctionSlice{orig: orig, state: state} } -// NewFunctionSlice creates a FunctionSlice with 0 elements. +// NewFunctionSlice creates a FunctionSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewFunctionSlice() FunctionSlice { - orig := []*otlpprofiles.Function(nil) + orig := []*internal.Function(nil) return newFunctionSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es FunctionSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpprofiles.Function, len(*es.orig), newCap) + newOrig := make([]*internal.Function, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es FunctionSlice) EnsureCapacity(newCap int) { // It returns the newly added Function. func (es FunctionSlice) AppendEmpty() Function { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigFunction()) + *es.orig = append(*es.orig, internal.NewFunction()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es FunctionSlice) RemoveIf(f func(Function) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigFunction((*es.orig)[i], true) + internal.DeleteFunction((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es FunctionSlice) CopyTo(dest FunctionSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigFunctionSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyFunctionPtrSlice(*dest.orig, *es.orig) } // Sort sorts the Function elements within FunctionSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunit.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunit.go index 052c844cd1d..15eac86a657 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunit.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunit.go @@ -8,7 +8,6 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -22,11 +21,11 @@ import ( // Must use NewKeyValueAndUnit function to create new instances. // Important: zero-initialized instance is not valid for use. type KeyValueAndUnit struct { - orig *otlpprofiles.KeyValueAndUnit + orig *internal.KeyValueAndUnit state *internal.State } -func newKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit, state *internal.State) KeyValueAndUnit { +func newKeyValueAndUnit(orig *internal.KeyValueAndUnit, state *internal.State) KeyValueAndUnit { return KeyValueAndUnit{orig: orig, state: state} } @@ -35,7 +34,7 @@ func newKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit, state *internal.Stat // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewKeyValueAndUnit() KeyValueAndUnit { - return newKeyValueAndUnit(internal.NewOrigKeyValueAndUnit(), internal.NewState()) + return newKeyValueAndUnit(internal.NewKeyValueAndUnit(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -47,7 +46,7 @@ func (ms KeyValueAndUnit) MoveTo(dest KeyValueAndUnit) { if ms.orig == dest.orig { return } - internal.DeleteOrigKeyValueAndUnit(dest.orig, false) + internal.DeleteKeyValueAndUnit(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -64,7 +63,7 @@ func (ms KeyValueAndUnit) SetKeyStrindex(v int32) { // Value returns the value associated with this KeyValueAndUnit. func (ms KeyValueAndUnit) Value() pcommon.Value { - return pcommon.Value(internal.NewValue(&ms.orig.Value, ms.state)) + return pcommon.Value(internal.NewValueWrapper(&ms.orig.Value, ms.state)) } // UnitStrindex returns the unitstrindex associated with this KeyValueAndUnit. @@ -81,5 +80,5 @@ func (ms KeyValueAndUnit) SetUnitStrindex(v int32) { // CopyTo copies all properties from the current struct overriding the destination. func (ms KeyValueAndUnit) CopyTo(dest KeyValueAndUnit) { dest.state.AssertMutable() - internal.CopyOrigKeyValueAndUnit(dest.orig, ms.orig) + internal.CopyKeyValueAndUnit(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunitslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunitslice.go index 4dfd60afe62..a068a3d317f 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunitslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunitslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // KeyValueAndUnitSlice logically represents a slice of KeyValueAndUnit. @@ -22,18 +21,18 @@ import ( // Must use NewKeyValueAndUnitSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type KeyValueAndUnitSlice struct { - orig *[]*otlpprofiles.KeyValueAndUnit + orig *[]*internal.KeyValueAndUnit state *internal.State } -func newKeyValueAndUnitSlice(orig *[]*otlpprofiles.KeyValueAndUnit, state *internal.State) KeyValueAndUnitSlice { +func newKeyValueAndUnitSlice(orig *[]*internal.KeyValueAndUnit, state *internal.State) KeyValueAndUnitSlice { return KeyValueAndUnitSlice{orig: orig, state: state} } -// NewKeyValueAndUnitSlice creates a KeyValueAndUnitSlice with 0 elements. +// NewKeyValueAndUnitSlice creates a KeyValueAndUnitSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewKeyValueAndUnitSlice() KeyValueAndUnitSlice { - orig := []*otlpprofiles.KeyValueAndUnit(nil) + orig := []*internal.KeyValueAndUnit(nil) return newKeyValueAndUnitSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es KeyValueAndUnitSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpprofiles.KeyValueAndUnit, len(*es.orig), newCap) + newOrig := make([]*internal.KeyValueAndUnit, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es KeyValueAndUnitSlice) EnsureCapacity(newCap int) { // It returns the newly added KeyValueAndUnit. func (es KeyValueAndUnitSlice) AppendEmpty() KeyValueAndUnit { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigKeyValueAndUnit()) + *es.orig = append(*es.orig, internal.NewKeyValueAndUnit()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es KeyValueAndUnitSlice) RemoveIf(f func(KeyValueAndUnit) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigKeyValueAndUnit((*es.orig)[i], true) + internal.DeleteKeyValueAndUnit((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es KeyValueAndUnitSlice) CopyTo(dest KeyValueAndUnitSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigKeyValueAndUnitSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyKeyValueAndUnitPtrSlice(*dest.orig, *es.orig) } // Sort sorts the KeyValueAndUnit elements within KeyValueAndUnitSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go index 421cf1eb87d..724ce01ec23 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go @@ -8,7 +8,6 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // Line details a specific line in a source code, linked to a function. @@ -19,11 +18,11 @@ import ( // Must use NewLine function to create new instances. // Important: zero-initialized instance is not valid for use. type Line struct { - orig *otlpprofiles.Line + orig *internal.Line state *internal.State } -func newLine(orig *otlpprofiles.Line, state *internal.State) Line { +func newLine(orig *internal.Line, state *internal.State) Line { return Line{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newLine(orig *otlpprofiles.Line, state *internal.State) Line { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewLine() Line { - return newLine(internal.NewOrigLine(), internal.NewState()) + return newLine(internal.NewLine(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms Line) MoveTo(dest Line) { if ms.orig == dest.orig { return } - internal.DeleteOrigLine(dest.orig, false) + internal.DeleteLine(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -84,5 +83,5 @@ func (ms Line) SetColumn(v int64) { // CopyTo copies all properties from the current struct overriding the destination. func (ms Line) CopyTo(dest Line) { dest.state.AssertMutable() - internal.CopyOrigLine(dest.orig, ms.orig) + internal.CopyLine(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go index 96051f11e58..77bc632bec9 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // LineSlice logically represents a slice of Line. @@ -22,18 +21,18 @@ import ( // Must use NewLineSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type LineSlice struct { - orig *[]*otlpprofiles.Line + orig *[]*internal.Line state *internal.State } -func newLineSlice(orig *[]*otlpprofiles.Line, state *internal.State) LineSlice { +func newLineSlice(orig *[]*internal.Line, state *internal.State) LineSlice { return LineSlice{orig: orig, state: state} } -// NewLineSlice creates a LineSlice with 0 elements. +// NewLineSlice creates a LineSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewLineSlice() LineSlice { - orig := []*otlpprofiles.Line(nil) + orig := []*internal.Line(nil) return newLineSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es LineSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpprofiles.Line, len(*es.orig), newCap) + newOrig := make([]*internal.Line, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es LineSlice) EnsureCapacity(newCap int) { // It returns the newly added Line. func (es LineSlice) AppendEmpty() Line { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigLine()) + *es.orig = append(*es.orig, internal.NewLine()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es LineSlice) RemoveIf(f func(Line) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigLine((*es.orig)[i], true) + internal.DeleteLine((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es LineSlice) CopyTo(dest LineSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigLineSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyLinePtrSlice(*dest.orig, *es.orig) } // Sort sorts the Line elements within LineSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go index 8b2ae094fda..70f401edacd 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go @@ -8,8 +8,6 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - "go.opentelemetry.io/collector/pdata/internal/data" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -21,11 +19,11 @@ import ( // Must use NewLink function to create new instances. // Important: zero-initialized instance is not valid for use. type Link struct { - orig *otlpprofiles.Link + orig *internal.Link state *internal.State } -func newLink(orig *otlpprofiles.Link, state *internal.State) Link { +func newLink(orig *internal.Link, state *internal.State) Link { return Link{orig: orig, state: state} } @@ -34,7 +32,7 @@ func newLink(orig *otlpprofiles.Link, state *internal.State) Link { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewLink() Link { - return newLink(internal.NewOrigLink(), internal.NewState()) + return newLink(internal.NewLink(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -46,7 +44,7 @@ func (ms Link) MoveTo(dest Link) { if ms.orig == dest.orig { return } - internal.DeleteOrigLink(dest.orig, false) + internal.DeleteLink(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -58,7 +56,7 @@ func (ms Link) TraceID() pcommon.TraceID { // SetTraceID replaces the traceid associated with this Link. func (ms Link) SetTraceID(v pcommon.TraceID) { ms.state.AssertMutable() - ms.orig.TraceId = data.TraceID(v) + ms.orig.TraceId = internal.TraceID(v) } // SpanID returns the spanid associated with this Link. @@ -69,11 +67,11 @@ func (ms Link) SpanID() pcommon.SpanID { // SetSpanID replaces the spanid associated with this Link. func (ms Link) SetSpanID(v pcommon.SpanID) { ms.state.AssertMutable() - ms.orig.SpanId = data.SpanID(v) + ms.orig.SpanId = internal.SpanID(v) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Link) CopyTo(dest Link) { dest.state.AssertMutable() - internal.CopyOrigLink(dest.orig, ms.orig) + internal.CopyLink(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go index 1f20423da2d..7ebce965426 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // LinkSlice logically represents a slice of Link. @@ -22,18 +21,18 @@ import ( // Must use NewLinkSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type LinkSlice struct { - orig *[]*otlpprofiles.Link + orig *[]*internal.Link state *internal.State } -func newLinkSlice(orig *[]*otlpprofiles.Link, state *internal.State) LinkSlice { +func newLinkSlice(orig *[]*internal.Link, state *internal.State) LinkSlice { return LinkSlice{orig: orig, state: state} } -// NewLinkSlice creates a LinkSlice with 0 elements. +// NewLinkSlice creates a LinkSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewLinkSlice() LinkSlice { - orig := []*otlpprofiles.Link(nil) + orig := []*internal.Link(nil) return newLinkSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es LinkSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpprofiles.Link, len(*es.orig), newCap) + newOrig := make([]*internal.Link, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es LinkSlice) EnsureCapacity(newCap int) { // It returns the newly added Link. func (es LinkSlice) AppendEmpty() Link { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigLink()) + *es.orig = append(*es.orig, internal.NewLink()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es LinkSlice) RemoveIf(f func(Link) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigLink((*es.orig)[i], true) + internal.DeleteLink((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es LinkSlice) CopyTo(dest LinkSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigLinkSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyLinkPtrSlice(*dest.orig, *es.orig) } // Sort sorts the Link elements within LinkSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go index a53c5800953..5679593264e 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go @@ -8,7 +8,6 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewLocation function to create new instances. // Important: zero-initialized instance is not valid for use. type Location struct { - orig *otlpprofiles.Location + orig *internal.Location state *internal.State } -func newLocation(orig *otlpprofiles.Location, state *internal.State) Location { +func newLocation(orig *internal.Location, state *internal.State) Location { return Location{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newLocation(orig *otlpprofiles.Location, state *internal.State) Location { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewLocation() Location { - return newLocation(internal.NewOrigLocation(), internal.NewState()) + return newLocation(internal.NewLocation(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,7 +44,7 @@ func (ms Location) MoveTo(dest Location) { if ms.orig == dest.orig { return } - internal.DeleteOrigLocation(dest.orig, false) + internal.DeleteLocation(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -71,18 +70,18 @@ func (ms Location) SetAddress(v uint64) { ms.orig.Address = v } -// Line returns the Line associated with this Location. -func (ms Location) Line() LineSlice { - return newLineSlice(&ms.orig.Line, ms.state) +// Lines returns the Lines associated with this Location. +func (ms Location) Lines() LineSlice { + return newLineSlice(&ms.orig.Lines, ms.state) } // AttributeIndices returns the AttributeIndices associated with this Location. func (ms Location) AttributeIndices() pcommon.Int32Slice { - return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.AttributeIndices, ms.state)) + return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state)) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Location) CopyTo(dest Location) { dest.state.AssertMutable() - internal.CopyOrigLocation(dest.orig, ms.orig) + internal.CopyLocation(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go index 2cba74b954c..9d68e247f21 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // LocationSlice logically represents a slice of Location. @@ -22,18 +21,18 @@ import ( // Must use NewLocationSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type LocationSlice struct { - orig *[]*otlpprofiles.Location + orig *[]*internal.Location state *internal.State } -func newLocationSlice(orig *[]*otlpprofiles.Location, state *internal.State) LocationSlice { +func newLocationSlice(orig *[]*internal.Location, state *internal.State) LocationSlice { return LocationSlice{orig: orig, state: state} } -// NewLocationSlice creates a LocationSlice with 0 elements. +// NewLocationSlice creates a LocationSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewLocationSlice() LocationSlice { - orig := []*otlpprofiles.Location(nil) + orig := []*internal.Location(nil) return newLocationSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es LocationSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpprofiles.Location, len(*es.orig), newCap) + newOrig := make([]*internal.Location, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es LocationSlice) EnsureCapacity(newCap int) { // It returns the newly added Location. func (es LocationSlice) AppendEmpty() Location { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigLocation()) + *es.orig = append(*es.orig, internal.NewLocation()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es LocationSlice) RemoveIf(f func(Location) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigLocation((*es.orig)[i], true) + internal.DeleteLocation((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es LocationSlice) CopyTo(dest LocationSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigLocationSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyLocationPtrSlice(*dest.orig, *es.orig) } // Sort sorts the Location elements within LocationSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go index 24e1efe2930..8294acc5839 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go @@ -8,7 +8,6 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewMapping function to create new instances. // Important: zero-initialized instance is not valid for use. type Mapping struct { - orig *otlpprofiles.Mapping + orig *internal.Mapping state *internal.State } -func newMapping(orig *otlpprofiles.Mapping, state *internal.State) Mapping { +func newMapping(orig *internal.Mapping, state *internal.State) Mapping { return Mapping{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newMapping(orig *otlpprofiles.Mapping, state *internal.State) Mapping { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewMapping() Mapping { - return newMapping(internal.NewOrigMapping(), internal.NewState()) + return newMapping(internal.NewMapping(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,7 +44,7 @@ func (ms Mapping) MoveTo(dest Mapping) { if ms.orig == dest.orig { return } - internal.DeleteOrigMapping(dest.orig, false) + internal.DeleteMapping(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -95,11 +94,11 @@ func (ms Mapping) SetFilenameStrindex(v int32) { // AttributeIndices returns the AttributeIndices associated with this Mapping. func (ms Mapping) AttributeIndices() pcommon.Int32Slice { - return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.AttributeIndices, ms.state)) + return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state)) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Mapping) CopyTo(dest Mapping) { dest.state.AssertMutable() - internal.CopyOrigMapping(dest.orig, ms.orig) + internal.CopyMapping(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go index 2dd3a8ffe5d..c4ed873727b 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // MappingSlice logically represents a slice of Mapping. @@ -22,18 +21,18 @@ import ( // Must use NewMappingSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type MappingSlice struct { - orig *[]*otlpprofiles.Mapping + orig *[]*internal.Mapping state *internal.State } -func newMappingSlice(orig *[]*otlpprofiles.Mapping, state *internal.State) MappingSlice { +func newMappingSlice(orig *[]*internal.Mapping, state *internal.State) MappingSlice { return MappingSlice{orig: orig, state: state} } -// NewMappingSlice creates a MappingSlice with 0 elements. +// NewMappingSlice creates a MappingSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewMappingSlice() MappingSlice { - orig := []*otlpprofiles.Mapping(nil) + orig := []*internal.Mapping(nil) return newMappingSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es MappingSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpprofiles.Mapping, len(*es.orig), newCap) + newOrig := make([]*internal.Mapping, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es MappingSlice) EnsureCapacity(newCap int) { // It returns the newly added Mapping. func (es MappingSlice) AppendEmpty() Mapping { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigMapping()) + *es.orig = append(*es.orig, internal.NewMapping()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es MappingSlice) RemoveIf(f func(Mapping) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigMapping((*es.orig)[i], true) + internal.DeleteMapping((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es MappingSlice) CopyTo(dest MappingSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigMappingSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyMappingPtrSlice(*dest.orig, *es.orig) } // Sort sorts the Mapping elements within MappingSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go index be0f6bf0b29..75faa9dbe43 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go @@ -8,8 +8,6 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - "go.opentelemetry.io/collector/pdata/internal/data" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -21,11 +19,11 @@ import ( // Must use NewProfile function to create new instances. // Important: zero-initialized instance is not valid for use. type Profile struct { - orig *otlpprofiles.Profile + orig *internal.Profile state *internal.State } -func newProfile(orig *otlpprofiles.Profile, state *internal.State) Profile { +func newProfile(orig *internal.Profile, state *internal.State) Profile { return Profile{orig: orig, state: state} } @@ -34,7 +32,7 @@ func newProfile(orig *otlpprofiles.Profile, state *internal.State) Profile { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewProfile() Profile { - return newProfile(internal.NewOrigProfile(), internal.NewState()) + return newProfile(internal.NewProfile(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -46,7 +44,7 @@ func (ms Profile) MoveTo(dest Profile) { if ms.orig == dest.orig { return } - internal.DeleteOrigProfile(dest.orig, false) + internal.DeleteProfile(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -55,9 +53,9 @@ func (ms Profile) SampleType() ValueType { return newValueType(&ms.orig.SampleType, ms.state) } -// Sample returns the Sample associated with this Profile. -func (ms Profile) Sample() SampleSlice { - return newSampleSlice(&ms.orig.Sample, ms.state) +// Samples returns the Samples associated with this Profile. +func (ms Profile) Samples() SampleSlice { + return newSampleSlice(&ms.orig.Samples, ms.state) } // Time returns the time associated with this Profile. @@ -71,15 +69,15 @@ func (ms Profile) SetTime(v pcommon.Timestamp) { ms.orig.TimeUnixNano = uint64(v) } -// Duration returns the duration associated with this Profile. -func (ms Profile) Duration() pcommon.Timestamp { - return pcommon.Timestamp(ms.orig.DurationNano) +// DurationNano returns the durationnano associated with this Profile. +func (ms Profile) DurationNano() uint64 { + return ms.orig.DurationNano } -// SetDuration replaces the duration associated with this Profile. -func (ms Profile) SetDuration(v pcommon.Timestamp) { +// SetDurationNano replaces the durationnano associated with this Profile. +func (ms Profile) SetDurationNano(v uint64) { ms.state.AssertMutable() - ms.orig.DurationNano = uint64(v) + ms.orig.DurationNano = v } // PeriodType returns the periodtype associated with this Profile. @@ -98,11 +96,6 @@ func (ms Profile) SetPeriod(v int64) { ms.orig.Period = v } -// CommentStrindices returns the CommentStrindices associated with this Profile. -func (ms Profile) CommentStrindices() pcommon.Int32Slice { - return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.CommentStrindices, ms.state)) -} - // ProfileID returns the profileid associated with this Profile. func (ms Profile) ProfileID() ProfileID { return ProfileID(ms.orig.ProfileId) @@ -111,7 +104,7 @@ func (ms Profile) ProfileID() ProfileID { // SetProfileID replaces the profileid associated with this Profile. func (ms Profile) SetProfileID(v ProfileID) { ms.state.AssertMutable() - ms.orig.ProfileId = data.ProfileID(v) + ms.orig.ProfileId = internal.ProfileID(v) } // DroppedAttributesCount returns the droppedattributescount associated with this Profile. @@ -138,16 +131,16 @@ func (ms Profile) SetOriginalPayloadFormat(v string) { // OriginalPayload returns the OriginalPayload associated with this Profile. func (ms Profile) OriginalPayload() pcommon.ByteSlice { - return pcommon.ByteSlice(internal.NewByteSlice(&ms.orig.OriginalPayload, ms.state)) + return pcommon.ByteSlice(internal.NewByteSliceWrapper(&ms.orig.OriginalPayload, ms.state)) } // AttributeIndices returns the AttributeIndices associated with this Profile. func (ms Profile) AttributeIndices() pcommon.Int32Slice { - return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.AttributeIndices, ms.state)) + return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state)) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Profile) CopyTo(dest Profile) { dest.state.AssertMutable() - internal.CopyOrigProfile(dest.orig, ms.orig) + internal.CopyProfile(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profiles.go index bcf47932195..2dc85554293 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profiles.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profiles.go @@ -8,7 +8,6 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" ) // Profiles is the top-level struct that is propagated through the profiles pipeline. @@ -19,10 +18,10 @@ import ( // // Must use NewProfiles function to create new instances. // Important: zero-initialized instance is not valid for use. -type Profiles internal.Profiles +type Profiles internal.ProfilesWrapper -func newProfiles(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, state *internal.State) Profiles { - return Profiles(internal.NewProfiles(orig, state)) +func newProfiles(orig *internal.ExportProfilesServiceRequest, state *internal.State) Profiles { + return Profiles(internal.NewProfilesWrapper(orig, state)) } // NewProfiles creates a new empty Profiles. @@ -30,7 +29,7 @@ func newProfiles(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, state // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewProfiles() Profiles { - return newProfiles(internal.NewOrigExportProfilesServiceRequest(), internal.NewState()) + return newProfiles(internal.NewExportProfilesServiceRequest(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -42,7 +41,7 @@ func (ms Profiles) MoveTo(dest Profiles) { if ms.getOrig() == dest.getOrig() { return } - internal.DeleteOrigExportProfilesServiceRequest(dest.getOrig(), false) + internal.DeleteExportProfilesServiceRequest(dest.getOrig(), false) *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig() } @@ -59,13 +58,13 @@ func (ms Profiles) Dictionary() ProfilesDictionary { // CopyTo copies all properties from the current struct overriding the destination. func (ms Profiles) CopyTo(dest Profiles) { dest.getState().AssertMutable() - internal.CopyOrigExportProfilesServiceRequest(dest.getOrig(), ms.getOrig()) + internal.CopyExportProfilesServiceRequest(dest.getOrig(), ms.getOrig()) } -func (ms Profiles) getOrig() *otlpcollectorprofiles.ExportProfilesServiceRequest { - return internal.GetOrigProfiles(internal.Profiles(ms)) +func (ms Profiles) getOrig() *internal.ExportProfilesServiceRequest { + return internal.GetProfilesOrig(internal.ProfilesWrapper(ms)) } func (ms Profiles) getState() *internal.State { - return internal.GetProfilesState(internal.Profiles(ms)) + return internal.GetProfilesState(internal.ProfilesWrapper(ms)) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesdata.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesdata.go new file mode 100644 index 00000000000..dde7a05b6f4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesdata.go @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" +) + +// ProfilesData represents the profiles data that can be stored in persistent storage, +// OR can be embedded by other protocols that transfer OTLP profiles data but do not +// implement the OTLP protocol. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewProfilesData function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ProfilesData internal.ProfilesDataWrapper + +func newProfilesData(orig *internal.ProfilesData, state *internal.State) ProfilesData { + return ProfilesData(internal.NewProfilesDataWrapper(orig, state)) +} + +// NewProfilesData creates a new empty ProfilesData. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewProfilesData() ProfilesData { + return newProfilesData(internal.NewProfilesData(), internal.NewState()) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ProfilesData) MoveTo(dest ProfilesData) { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + // If they point to the same data, they are the same, nothing to do. + if ms.getOrig() == dest.getOrig() { + return + } + internal.DeleteProfilesData(dest.getOrig(), false) + *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig() +} + +// ResourceProfiles returns the ResourceProfiles associated with this ProfilesData. +func (ms ProfilesData) ResourceProfiles() ResourceProfilesSlice { + return newResourceProfilesSlice(&ms.getOrig().ResourceProfiles, ms.getState()) +} + +// Dictionary returns the dictionary associated with this ProfilesData. +func (ms ProfilesData) Dictionary() ProfilesDictionary { + return newProfilesDictionary(&ms.getOrig().Dictionary, ms.getState()) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ProfilesData) CopyTo(dest ProfilesData) { + dest.getState().AssertMutable() + internal.CopyProfilesData(dest.getOrig(), ms.getOrig()) +} + +func (ms ProfilesData) getOrig() *internal.ProfilesData { + return internal.GetProfilesDataOrig(internal.ProfilesDataWrapper(ms)) +} + +func (ms ProfilesData) getState() *internal.State { + return internal.GetProfilesDataState(internal.ProfilesDataWrapper(ms)) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesdictionary.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesdictionary.go index fe1f650f65c..673da6d84ef 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesdictionary.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesdictionary.go @@ -8,7 +8,6 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewProfilesDictionary function to create new instances. // Important: zero-initialized instance is not valid for use. type ProfilesDictionary struct { - orig *otlpprofiles.ProfilesDictionary + orig *internal.ProfilesDictionary state *internal.State } -func newProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, state *internal.State) ProfilesDictionary { +func newProfilesDictionary(orig *internal.ProfilesDictionary, state *internal.State) ProfilesDictionary { return ProfilesDictionary{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, state *interna // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewProfilesDictionary() ProfilesDictionary { - return newProfilesDictionary(internal.NewOrigProfilesDictionary(), internal.NewState()) + return newProfilesDictionary(internal.NewProfilesDictionary(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,7 +44,7 @@ func (ms ProfilesDictionary) MoveTo(dest ProfilesDictionary) { if ms.orig == dest.orig { return } - internal.DeleteOrigProfilesDictionary(dest.orig, false) + internal.DeleteProfilesDictionary(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -71,7 +70,7 @@ func (ms ProfilesDictionary) LinkTable() LinkSlice { // StringTable returns the StringTable associated with this ProfilesDictionary. func (ms ProfilesDictionary) StringTable() pcommon.StringSlice { - return pcommon.StringSlice(internal.NewStringSlice(&ms.orig.StringTable, ms.state)) + return pcommon.StringSlice(internal.NewStringSliceWrapper(&ms.orig.StringTable, ms.state)) } // AttributeTable returns the AttributeTable associated with this ProfilesDictionary. @@ -87,5 +86,5 @@ func (ms ProfilesDictionary) StackTable() StackSlice { // CopyTo copies all properties from the current struct overriding the destination. func (ms ProfilesDictionary) CopyTo(dest ProfilesDictionary) { dest.state.AssertMutable() - internal.CopyOrigProfilesDictionary(dest.orig, ms.orig) + internal.CopyProfilesDictionary(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go index f1af93b8a7c..98c7e2fc234 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // ProfilesSlice logically represents a slice of Profile. @@ -22,18 +21,18 @@ import ( // Must use NewProfilesSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ProfilesSlice struct { - orig *[]*otlpprofiles.Profile + orig *[]*internal.Profile state *internal.State } -func newProfilesSlice(orig *[]*otlpprofiles.Profile, state *internal.State) ProfilesSlice { +func newProfilesSlice(orig *[]*internal.Profile, state *internal.State) ProfilesSlice { return ProfilesSlice{orig: orig, state: state} } -// NewProfilesSlice creates a ProfilesSlice with 0 elements. +// NewProfilesSlice creates a ProfilesSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewProfilesSlice() ProfilesSlice { - orig := []*otlpprofiles.Profile(nil) + orig := []*internal.Profile(nil) return newProfilesSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es ProfilesSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpprofiles.Profile, len(*es.orig), newCap) + newOrig := make([]*internal.Profile, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es ProfilesSlice) EnsureCapacity(newCap int) { // It returns the newly added Profile. func (es ProfilesSlice) AppendEmpty() Profile { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigProfile()) + *es.orig = append(*es.orig, internal.NewProfile()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es ProfilesSlice) RemoveIf(f func(Profile) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigProfile((*es.orig)[i], true) + internal.DeleteProfile((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es ProfilesSlice) CopyTo(dest ProfilesSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigProfileSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyProfilePtrSlice(*dest.orig, *es.orig) } // Sort sorts the Profile elements within ProfilesSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go index 8762dfd00a7..cee70803fa6 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go @@ -8,7 +8,6 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewResourceProfiles function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceProfiles struct { - orig *otlpprofiles.ResourceProfiles + orig *internal.ResourceProfiles state *internal.State } -func newResourceProfiles(orig *otlpprofiles.ResourceProfiles, state *internal.State) ResourceProfiles { +func newResourceProfiles(orig *internal.ResourceProfiles, state *internal.State) ResourceProfiles { return ResourceProfiles{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newResourceProfiles(orig *otlpprofiles.ResourceProfiles, state *internal.St // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewResourceProfiles() ResourceProfiles { - return newResourceProfiles(internal.NewOrigResourceProfiles(), internal.NewState()) + return newResourceProfiles(internal.NewResourceProfiles(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,13 +44,13 @@ func (ms ResourceProfiles) MoveTo(dest ResourceProfiles) { if ms.orig == dest.orig { return } - internal.DeleteOrigResourceProfiles(dest.orig, false) + internal.DeleteResourceProfiles(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // Resource returns the resource associated with this ResourceProfiles. func (ms ResourceProfiles) Resource() pcommon.Resource { - return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state)) + return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state)) } // ScopeProfiles returns the ScopeProfiles associated with this ResourceProfiles. @@ -73,5 +72,5 @@ func (ms ResourceProfiles) SetSchemaUrl(v string) { // CopyTo copies all properties from the current struct overriding the destination. func (ms ResourceProfiles) CopyTo(dest ResourceProfiles) { dest.state.AssertMutable() - internal.CopyOrigResourceProfiles(dest.orig, ms.orig) + internal.CopyResourceProfiles(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go index ee8f8236062..e1f4748cfcd 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // ResourceProfilesSlice logically represents a slice of ResourceProfiles. @@ -22,18 +21,18 @@ import ( // Must use NewResourceProfilesSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceProfilesSlice struct { - orig *[]*otlpprofiles.ResourceProfiles + orig *[]*internal.ResourceProfiles state *internal.State } -func newResourceProfilesSlice(orig *[]*otlpprofiles.ResourceProfiles, state *internal.State) ResourceProfilesSlice { +func newResourceProfilesSlice(orig *[]*internal.ResourceProfiles, state *internal.State) ResourceProfilesSlice { return ResourceProfilesSlice{orig: orig, state: state} } -// NewResourceProfilesSlice creates a ResourceProfilesSlice with 0 elements. +// NewResourceProfilesSlice creates a ResourceProfilesSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewResourceProfilesSlice() ResourceProfilesSlice { - orig := []*otlpprofiles.ResourceProfiles(nil) + orig := []*internal.ResourceProfiles(nil) return newResourceProfilesSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es ResourceProfilesSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpprofiles.ResourceProfiles, len(*es.orig), newCap) + newOrig := make([]*internal.ResourceProfiles, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es ResourceProfilesSlice) EnsureCapacity(newCap int) { // It returns the newly added ResourceProfiles. func (es ResourceProfilesSlice) AppendEmpty() ResourceProfiles { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigResourceProfiles()) + *es.orig = append(*es.orig, internal.NewResourceProfiles()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es ResourceProfilesSlice) RemoveIf(f func(ResourceProfiles) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigResourceProfiles((*es.orig)[i], true) + internal.DeleteResourceProfiles((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es ResourceProfilesSlice) CopyTo(dest ResourceProfilesSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigResourceProfilesSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyResourceProfilesPtrSlice(*dest.orig, *es.orig) } // Sort sorts the ResourceProfiles elements within ResourceProfilesSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go index 36ec3f773b0..c50d09d7f4d 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go @@ -8,7 +8,6 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewSample function to create new instances. // Important: zero-initialized instance is not valid for use. type Sample struct { - orig *otlpprofiles.Sample + orig *internal.Sample state *internal.State } -func newSample(orig *otlpprofiles.Sample, state *internal.State) Sample { +func newSample(orig *internal.Sample, state *internal.State) Sample { return Sample{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newSample(orig *otlpprofiles.Sample, state *internal.State) Sample { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSample() Sample { - return newSample(internal.NewOrigSample(), internal.NewState()) + return newSample(internal.NewSample(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,7 +44,7 @@ func (ms Sample) MoveTo(dest Sample) { if ms.orig == dest.orig { return } - internal.DeleteOrigSample(dest.orig, false) + internal.DeleteSample(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -62,12 +61,12 @@ func (ms Sample) SetStackIndex(v int32) { // Values returns the Values associated with this Sample. func (ms Sample) Values() pcommon.Int64Slice { - return pcommon.Int64Slice(internal.NewInt64Slice(&ms.orig.Values, ms.state)) + return pcommon.Int64Slice(internal.NewInt64SliceWrapper(&ms.orig.Values, ms.state)) } // AttributeIndices returns the AttributeIndices associated with this Sample. func (ms Sample) AttributeIndices() pcommon.Int32Slice { - return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.AttributeIndices, ms.state)) + return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state)) } // LinkIndex returns the linkindex associated with this Sample. @@ -83,11 +82,11 @@ func (ms Sample) SetLinkIndex(v int32) { // TimestampsUnixNano returns the TimestampsUnixNano associated with this Sample. func (ms Sample) TimestampsUnixNano() pcommon.UInt64Slice { - return pcommon.UInt64Slice(internal.NewUInt64Slice(&ms.orig.TimestampsUnixNano, ms.state)) + return pcommon.UInt64Slice(internal.NewUInt64SliceWrapper(&ms.orig.TimestampsUnixNano, ms.state)) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Sample) CopyTo(dest Sample) { dest.state.AssertMutable() - internal.CopyOrigSample(dest.orig, ms.orig) + internal.CopySample(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go index e645899dbbf..a0ce987218f 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // SampleSlice logically represents a slice of Sample. @@ -22,18 +21,18 @@ import ( // Must use NewSampleSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type SampleSlice struct { - orig *[]*otlpprofiles.Sample + orig *[]*internal.Sample state *internal.State } -func newSampleSlice(orig *[]*otlpprofiles.Sample, state *internal.State) SampleSlice { +func newSampleSlice(orig *[]*internal.Sample, state *internal.State) SampleSlice { return SampleSlice{orig: orig, state: state} } -// NewSampleSlice creates a SampleSlice with 0 elements. +// NewSampleSlice creates a SampleSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewSampleSlice() SampleSlice { - orig := []*otlpprofiles.Sample(nil) + orig := []*internal.Sample(nil) return newSampleSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es SampleSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpprofiles.Sample, len(*es.orig), newCap) + newOrig := make([]*internal.Sample, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es SampleSlice) EnsureCapacity(newCap int) { // It returns the newly added Sample. func (es SampleSlice) AppendEmpty() Sample { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigSample()) + *es.orig = append(*es.orig, internal.NewSample()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es SampleSlice) RemoveIf(f func(Sample) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigSample((*es.orig)[i], true) + internal.DeleteSample((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es SampleSlice) CopyTo(dest SampleSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigSampleSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopySamplePtrSlice(*dest.orig, *es.orig) } // Sort sorts the Sample elements within SampleSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go index d51491d2582..01456ceccf4 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go @@ -8,7 +8,6 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewScopeProfiles function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeProfiles struct { - orig *otlpprofiles.ScopeProfiles + orig *internal.ScopeProfiles state *internal.State } -func newScopeProfiles(orig *otlpprofiles.ScopeProfiles, state *internal.State) ScopeProfiles { +func newScopeProfiles(orig *internal.ScopeProfiles, state *internal.State) ScopeProfiles { return ScopeProfiles{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newScopeProfiles(orig *otlpprofiles.ScopeProfiles, state *internal.State) S // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewScopeProfiles() ScopeProfiles { - return newScopeProfiles(internal.NewOrigScopeProfiles(), internal.NewState()) + return newScopeProfiles(internal.NewScopeProfiles(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,13 +44,13 @@ func (ms ScopeProfiles) MoveTo(dest ScopeProfiles) { if ms.orig == dest.orig { return } - internal.DeleteOrigScopeProfiles(dest.orig, false) + internal.DeleteScopeProfiles(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // Scope returns the scope associated with this ScopeProfiles. func (ms ScopeProfiles) Scope() pcommon.InstrumentationScope { - return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state)) + return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state)) } // Profiles returns the Profiles associated with this ScopeProfiles. @@ -73,5 +72,5 @@ func (ms ScopeProfiles) SetSchemaUrl(v string) { // CopyTo copies all properties from the current struct overriding the destination. func (ms ScopeProfiles) CopyTo(dest ScopeProfiles) { dest.state.AssertMutable() - internal.CopyOrigScopeProfiles(dest.orig, ms.orig) + internal.CopyScopeProfiles(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go index c14c8993874..5c438763add 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // ScopeProfilesSlice logically represents a slice of ScopeProfiles. @@ -22,18 +21,18 @@ import ( // Must use NewScopeProfilesSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeProfilesSlice struct { - orig *[]*otlpprofiles.ScopeProfiles + orig *[]*internal.ScopeProfiles state *internal.State } -func newScopeProfilesSlice(orig *[]*otlpprofiles.ScopeProfiles, state *internal.State) ScopeProfilesSlice { +func newScopeProfilesSlice(orig *[]*internal.ScopeProfiles, state *internal.State) ScopeProfilesSlice { return ScopeProfilesSlice{orig: orig, state: state} } -// NewScopeProfilesSlice creates a ScopeProfilesSlice with 0 elements. +// NewScopeProfilesSlice creates a ScopeProfilesSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewScopeProfilesSlice() ScopeProfilesSlice { - orig := []*otlpprofiles.ScopeProfiles(nil) + orig := []*internal.ScopeProfiles(nil) return newScopeProfilesSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es ScopeProfilesSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpprofiles.ScopeProfiles, len(*es.orig), newCap) + newOrig := make([]*internal.ScopeProfiles, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es ScopeProfilesSlice) EnsureCapacity(newCap int) { // It returns the newly added ScopeProfiles. func (es ScopeProfilesSlice) AppendEmpty() ScopeProfiles { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigScopeProfiles()) + *es.orig = append(*es.orig, internal.NewScopeProfiles()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es ScopeProfilesSlice) RemoveIf(f func(ScopeProfiles) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigScopeProfiles((*es.orig)[i], true) + internal.DeleteScopeProfiles((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es ScopeProfilesSlice) CopyTo(dest ScopeProfilesSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigScopeProfilesSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyScopeProfilesPtrSlice(*dest.orig, *es.orig) } // Sort sorts the ScopeProfiles elements within ScopeProfilesSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stack.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stack.go index 025571b1282..53e4352c54f 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stack.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stack.go @@ -8,7 +8,6 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewStack function to create new instances. // Important: zero-initialized instance is not valid for use. type Stack struct { - orig *otlpprofiles.Stack + orig *internal.Stack state *internal.State } -func newStack(orig *otlpprofiles.Stack, state *internal.State) Stack { +func newStack(orig *internal.Stack, state *internal.State) Stack { return Stack{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newStack(orig *otlpprofiles.Stack, state *internal.State) Stack { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewStack() Stack { - return newStack(internal.NewOrigStack(), internal.NewState()) + return newStack(internal.NewStack(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,17 +44,17 @@ func (ms Stack) MoveTo(dest Stack) { if ms.orig == dest.orig { return } - internal.DeleteOrigStack(dest.orig, false) + internal.DeleteStack(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // LocationIndices returns the LocationIndices associated with this Stack. func (ms Stack) LocationIndices() pcommon.Int32Slice { - return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.LocationIndices, ms.state)) + return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.LocationIndices, ms.state)) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Stack) CopyTo(dest Stack) { dest.state.AssertMutable() - internal.CopyOrigStack(dest.orig, ms.orig) + internal.CopyStack(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stackslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stackslice.go index 41ecf5b7125..4df19fb68d8 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stackslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stackslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // StackSlice logically represents a slice of Stack. @@ -22,18 +21,18 @@ import ( // Must use NewStackSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type StackSlice struct { - orig *[]*otlpprofiles.Stack + orig *[]*internal.Stack state *internal.State } -func newStackSlice(orig *[]*otlpprofiles.Stack, state *internal.State) StackSlice { +func newStackSlice(orig *[]*internal.Stack, state *internal.State) StackSlice { return StackSlice{orig: orig, state: state} } -// NewStackSlice creates a StackSlice with 0 elements. +// NewStackSlice creates a StackSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewStackSlice() StackSlice { - orig := []*otlpprofiles.Stack(nil) + orig := []*internal.Stack(nil) return newStackSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es StackSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpprofiles.Stack, len(*es.orig), newCap) + newOrig := make([]*internal.Stack, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es StackSlice) EnsureCapacity(newCap int) { // It returns the newly added Stack. func (es StackSlice) AppendEmpty() Stack { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigStack()) + *es.orig = append(*es.orig, internal.NewStack()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es StackSlice) RemoveIf(f func(Stack) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigStack((*es.orig)[i], true) + internal.DeleteStack((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es StackSlice) CopyTo(dest StackSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigStackSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyStackPtrSlice(*dest.orig, *es.orig) } // Sort sorts the Stack elements within StackSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go index 54be666b0c9..b994c95052b 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go @@ -8,10 +8,9 @@ package pprofile import ( "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) -// ValueType describes the type and units of a value, with an optional aggregation temporality. +// ValueType describes the type and units of a value. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. @@ -19,11 +18,11 @@ import ( // Must use NewValueType function to create new instances. // Important: zero-initialized instance is not valid for use. type ValueType struct { - orig *otlpprofiles.ValueType + orig *internal.ValueType state *internal.State } -func newValueType(orig *otlpprofiles.ValueType, state *internal.State) ValueType { +func newValueType(orig *internal.ValueType, state *internal.State) ValueType { return ValueType{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newValueType(orig *otlpprofiles.ValueType, state *internal.State) ValueType // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewValueType() ValueType { - return newValueType(internal.NewOrigValueType(), internal.NewState()) + return newValueType(internal.NewValueType(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms ValueType) MoveTo(dest ValueType) { if ms.orig == dest.orig { return } - internal.DeleteOrigValueType(dest.orig, false) + internal.DeleteValueType(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -70,19 +69,8 @@ func (ms ValueType) SetUnitStrindex(v int32) { ms.orig.UnitStrindex = v } -// AggregationTemporality returns the aggregationtemporality associated with this ValueType. -func (ms ValueType) AggregationTemporality() AggregationTemporality { - return AggregationTemporality(ms.orig.AggregationTemporality) -} - -// SetAggregationTemporality replaces the aggregationtemporality associated with this ValueType. -func (ms ValueType) SetAggregationTemporality(v AggregationTemporality) { - ms.state.AssertMutable() - ms.orig.AggregationTemporality = otlpprofiles.AggregationTemporality(v) -} - // CopyTo copies all properties from the current struct overriding the destination. func (ms ValueType) CopyTo(dest ValueType) { dest.state.AssertMutable() - internal.CopyOrigValueType(dest.orig, ms.orig) + internal.CopyValueType(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go index 16bda9d57c5..aa53a9707e2 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // ValueTypeSlice logically represents a slice of ValueType. @@ -22,18 +21,18 @@ import ( // Must use NewValueTypeSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ValueTypeSlice struct { - orig *[]*otlpprofiles.ValueType + orig *[]*internal.ValueType state *internal.State } -func newValueTypeSlice(orig *[]*otlpprofiles.ValueType, state *internal.State) ValueTypeSlice { +func newValueTypeSlice(orig *[]*internal.ValueType, state *internal.State) ValueTypeSlice { return ValueTypeSlice{orig: orig, state: state} } -// NewValueTypeSlice creates a ValueTypeSlice with 0 elements. +// NewValueTypeSlice creates a ValueTypeSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewValueTypeSlice() ValueTypeSlice { - orig := []*otlpprofiles.ValueType(nil) + orig := []*internal.ValueType(nil) return newValueTypeSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es ValueTypeSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlpprofiles.ValueType, len(*es.orig), newCap) + newOrig := make([]*internal.ValueType, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es ValueTypeSlice) EnsureCapacity(newCap int) { // It returns the newly added ValueType. func (es ValueTypeSlice) AppendEmpty() ValueType { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigValueType()) + *es.orig = append(*es.orig, internal.NewValueType()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es ValueTypeSlice) RemoveIf(f func(ValueType) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigValueType((*es.orig)[i], true) + internal.DeleteValueType((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es ValueTypeSlice) CopyTo(dest ValueTypeSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigValueTypeSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyValueTypePtrSlice(*dest.orig, *es.orig) } // Sort sorts the ValueType elements within ValueTypeSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go index 25ed17bfd90..8b3fa3fac85 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go @@ -6,7 +6,6 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" import ( "slices" - "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" ) @@ -18,7 +17,7 @@ type JSONMarshaler struct{} func (*JSONMarshaler) MarshalProfiles(pd Profiles) ([]byte, error) { dest := json.BorrowStream(nil) defer json.ReturnStream(dest) - internal.MarshalJSONOrigExportProfilesServiceRequest(pd.getOrig(), dest) + pd.getOrig().MarshalJSON(dest) if dest.Error() != nil { return nil, dest.Error() } @@ -33,7 +32,7 @@ func (*JSONUnmarshaler) UnmarshalProfiles(buf []byte) (Profiles, error) { iter := json.BorrowIterator(buf) defer json.ReturnIterator(iter) pd := NewProfiles() - internal.UnmarshalJSONOrigExportProfilesServiceRequest(pd.getOrig(), iter) + pd.getOrig().UnmarshalJSON(iter) if iter.Error() != nil { return Profiles{}, iter.Error() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/keyvalueandunit.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/keyvalueandunit.go index 3405074ba36..47bf4d151db 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/keyvalueandunit.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/keyvalueandunit.go @@ -3,6 +3,8 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" +import "fmt" + // Equal checks equality with another KeyValueAndUnit // It assumes both structs refer to the same dictionary. func (ms KeyValueAndUnit) Equal(val KeyValueAndUnit) bool { @@ -10,3 +12,33 @@ func (ms KeyValueAndUnit) Equal(val KeyValueAndUnit) bool { ms.UnitStrindex() == val.UnitStrindex() && ms.Value().Equal(val.Value()) } + +// switchDictionary updates the KeyValueAndUnit, switching its indices from one +// dictionary to another. +func (ms KeyValueAndUnit) switchDictionary(src, dst ProfilesDictionary) error { + if ms.KeyStrindex() > 0 { + if src.StringTable().Len() <= int(ms.KeyStrindex()) { + return fmt.Errorf("invalid key index %d", ms.KeyStrindex()) + } + + idx, err := SetString(dst.StringTable(), src.StringTable().At(int(ms.KeyStrindex()))) + if err != nil { + return fmt.Errorf("couldn't set key: %w", err) + } + ms.SetKeyStrindex(idx) + } + + if ms.UnitStrindex() > 0 { + if src.StringTable().Len() <= int(ms.UnitStrindex()) { + return fmt.Errorf("invalid unit index %d", ms.UnitStrindex()) + } + + idx, err := SetString(dst.StringTable(), src.StringTable().At(int(ms.UnitStrindex()))) + if err != nil { + return fmt.Errorf("couldn't set unit: %w", err) + } + ms.SetUnitStrindex(idx) + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/line.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/line.go index ae97b8f839e..7ed3461dddc 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/line.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/line.go @@ -3,6 +3,8 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" +import "fmt" + // Equal checks equality with another LineSlice func (l LineSlice) Equal(val LineSlice) bool { if l.Len() != val.Len() { @@ -24,3 +26,27 @@ func (l Line) Equal(val Line) bool { l.FunctionIndex() == val.FunctionIndex() && l.Line() == val.Line() } + +// switchDictionary updates the Line, switching its indices from one +// dictionary to another. +func (l Line) switchDictionary(src, dst ProfilesDictionary) error { + if l.FunctionIndex() > 0 { + if src.FunctionTable().Len() <= int(l.FunctionIndex()) { + return fmt.Errorf("invalid function index %d", l.FunctionIndex()) + } + + fn := src.FunctionTable().At(int(l.FunctionIndex())) + err := fn.switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("couldn't switch function dictionary: %w", err) + } + + idx, err := SetFunction(dst.FunctionTable(), fn) + if err != nil { + return fmt.Errorf("couldn't set function: %w", err) + } + l.SetFunctionIndex(idx) + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/links.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/links.go index b548a2f603c..c1cc2d5071c 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/links.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/links.go @@ -5,43 +5,27 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" import ( "errors" - "fmt" "math" ) var errTooManyLinkTableEntries = errors.New("too many entries in LinkTable") -// SetLink updates a LinkTable and a Sample's LinkIndex to -// add or update a link. -func SetLink(table LinkSlice, record Sample, li Link) error { - idx := int(record.LinkIndex()) - if idx > 0 { - if idx >= table.Len() { - return fmt.Errorf("index value %d out of range for LinkIndex", idx) - } - mapAt := table.At(idx) - if mapAt.Equal(li) { - // Link already exists, nothing to do. - return nil - } - } - +// SetLink updates a LinkTable, adding or providing a value and returns its +// index. +func SetLink(table LinkSlice, li Link) (int32, error) { for j, l := range table.All() { if l.Equal(li) { if j > math.MaxInt32 { - return errTooManyLinkTableEntries + return 0, errTooManyLinkTableEntries } - // Add the index of the existing link to the indices. - record.SetLinkIndex(int32(j)) //nolint:gosec // G115 overflow checked - return nil + return int32(j), nil } } if table.Len() >= math.MaxInt32 { - return errTooManyLinkTableEntries + return 0, errTooManyLinkTableEntries } li.CopyTo(table.AppendEmpty()) - record.SetLinkIndex(int32(table.Len() - 1)) //nolint:gosec // G115 overflow checked - return nil + return int32(table.Len() - 1), nil } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/location.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/location.go index 1e7099928da..a6963b3c9dc 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/location.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/location.go @@ -3,10 +3,59 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" +import "fmt" + // Equal checks equality with another Location func (ms Location) Equal(val Location) bool { return ms.MappingIndex() == val.MappingIndex() && ms.Address() == val.Address() && ms.AttributeIndices().Equal(val.AttributeIndices()) && - ms.Line().Equal(val.Line()) + ms.Lines().Equal(val.Lines()) +} + +// switchDictionary updates the Location, switching its indices from one +// dictionary to another. +func (ms Location) switchDictionary(src, dst ProfilesDictionary) error { + if ms.MappingIndex() > 0 { + if src.MappingTable().Len() <= int(ms.MappingIndex()) { + return fmt.Errorf("invalid mapping index %d", ms.MappingIndex()) + } + + mapping := src.MappingTable().At(int(ms.MappingIndex())) + err := mapping.switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("couldn't switch dictionary for mapping: %w", err) + } + idx, err := SetMapping(dst.MappingTable(), mapping) + if err != nil { + return fmt.Errorf("couldn't set mapping: %w", err) + } + ms.SetMappingIndex(idx) + } + + for i, v := range ms.AttributeIndices().All() { + if src.AttributeTable().Len() <= int(v) { + return fmt.Errorf("invalid attribute index %d", v) + } + + attr := src.AttributeTable().At(int(v)) + err := attr.switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("couldn't switch dictionary for attribute %d: %w", i, err) + } + idx, err := SetAttribute(dst.AttributeTable(), attr) + if err != nil { + return fmt.Errorf("couldn't set attribute %d: %w", i, err) + } + ms.AttributeIndices().SetAt(i, idx) + } + + for i, v := range ms.Lines().All() { + err := v.switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("couldn't switch dictionary for line %d: %w", i, err) + } + } + + return nil } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/locations.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/locations.go index 2af5f13b329..12b3cbe1be7 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/locations.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/locations.go @@ -5,7 +5,6 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" import ( "errors" - "fmt" "math" ) @@ -23,39 +22,7 @@ func FromLocationIndices(table LocationSlice, record Stack) LocationSlice { return m } -var ( - errTooManyLocationTableEntries = errors.New("too many entries in LocationTable") - errTooManyLocationIndicesEntries = errors.New("too many entries in LocationIndices") -) - -// PutLocation updates a LocationTable and a Stack's LocationIndices to -// add or update a location. -// -// Deprecated: [v0.138.0] use SetLocation instead. -func PutLocation(table LocationSlice, record Stack, loc Location) error { - for i, locIdx := range record.LocationIndices().All() { - idx := int(locIdx) - if idx < 0 || idx >= table.Len() { - return fmt.Errorf("index value %d out of range in LocationIndices[%d]", idx, i) - } - locAt := table.At(idx) - if locAt.Equal(loc) { - // Location already exists, nothing to do. - return nil - } - } - - if record.LocationIndices().Len() >= math.MaxInt32 { - return errTooManyLocationIndicesEntries - } - - id, err := SetLocation(table, loc) - if err != nil { - return err - } - record.LocationIndices().Append(id) - return nil -} +var errTooManyLocationTableEntries = errors.New("too many entries in LocationTable") // SetLocation updates a LocationTable, adding or providing a value and returns // its index. @@ -65,7 +32,7 @@ func SetLocation(table LocationSlice, loc Location) (int32, error) { if j > math.MaxInt32 { return 0, errTooManyLocationTableEntries } - return int32(j), nil //nolint:gosec // G115 overflow checked + return int32(j), nil } } @@ -74,5 +41,5 @@ func SetLocation(table LocationSlice, loc Location) (int32, error) { } loc.CopyTo(table.AppendEmpty()) - return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked + return int32(table.Len() - 1), nil } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/mapping.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/mapping.go index 238a54e3f2b..2b857fb2344 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/mapping.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/mapping.go @@ -3,6 +3,8 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" +import "fmt" + // Equal checks equality with another Mapping func (ms Mapping) Equal(val Mapping) bool { return ms.MemoryStart() == val.MemoryStart() && @@ -11,3 +13,38 @@ func (ms Mapping) Equal(val Mapping) bool { ms.FilenameStrindex() == val.FilenameStrindex() && ms.AttributeIndices().Equal(val.AttributeIndices()) } + +// switchDictionary updates the Mapping, switching its indices from one +// dictionary to another. +func (ms Mapping) switchDictionary(src, dst ProfilesDictionary) error { + if ms.FilenameStrindex() > 0 { + if src.StringTable().Len() <= int(ms.FilenameStrindex()) { + return fmt.Errorf("invalid filename index %d", ms.FilenameStrindex()) + } + + idx, err := SetString(dst.StringTable(), src.StringTable().At(int(ms.FilenameStrindex()))) + if err != nil { + return fmt.Errorf("couldn't set filename: %w", err) + } + ms.SetFilenameStrindex(idx) + } + + for i, v := range ms.AttributeIndices().All() { + if src.AttributeTable().Len() <= int(v) { + return fmt.Errorf("invalid attribute index %d", v) + } + + attr := src.AttributeTable().At(int(v)) + err := attr.switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("couldn't switch dictionary for attribute %d: %w", i, err) + } + idx, err := SetAttribute(dst.AttributeTable(), attr) + if err != nil { + return fmt.Errorf("couldn't set attribute %d: %w", i, err) + } + ms.AttributeIndices().SetAt(i, idx) + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/mappings.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/mappings.go index fe547e915ac..39943bf8ea0 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/mappings.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/mappings.go @@ -5,43 +5,27 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" import ( "errors" - "fmt" "math" ) var errTooManyMappingTableEntries = errors.New("too many entries in MappingTable") -// SetMapping updates a MappingTable and a Location's MappingIndex to -// add or update a mapping. -func SetMapping(table MappingSlice, record Location, ma Mapping) error { - idx := int(record.MappingIndex()) - if idx > 0 { - if idx >= table.Len() { - return fmt.Errorf("index value %d out of range for MappingIndex", idx) - } - mapAt := table.At(idx) - if mapAt.Equal(ma) { - // Mapping already exists, nothing to do. - return nil - } - } - +// SetMapping updates a MappingTable, adding or providing a value and returns +// its index. +func SetMapping(table MappingSlice, ma Mapping) (int32, error) { for j, m := range table.All() { if m.Equal(ma) { if j > math.MaxInt32 { - return errTooManyMappingTableEntries + return 0, errTooManyMappingTableEntries } - // Add the index of the existing mapping to the indices. - record.SetMappingIndex(int32(j)) //nolint:gosec // G115 overflow checked - return nil + return int32(j), nil } } if table.Len() >= math.MaxInt32 { - return errTooManyMappingTableEntries + return 0, errTooManyMappingTableEntries } ma.CopyTo(table.AppendEmpty()) - record.SetMappingIndex(int32(table.Len() - 1)) //nolint:gosec // G115 overflow checked - return nil + return int32(table.Len() - 1), nil } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go index 66642501e65..58f4f179474 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go @@ -3,42 +3,38 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" -import ( - "go.opentelemetry.io/collector/pdata/internal" -) - var _ MarshalSizer = (*ProtoMarshaler)(nil) type ProtoMarshaler struct{} func (e *ProtoMarshaler) MarshalProfiles(pd Profiles) ([]byte, error) { - size := internal.SizeProtoOrigExportProfilesServiceRequest(pd.getOrig()) + size := pd.getOrig().SizeProto() buf := make([]byte, size) - _ = internal.MarshalProtoOrigExportProfilesServiceRequest(pd.getOrig(), buf) + _ = pd.getOrig().MarshalProto(buf) return buf, nil } func (e *ProtoMarshaler) ProfilesSize(pd Profiles) int { - return internal.SizeProtoOrigExportProfilesServiceRequest(pd.getOrig()) + return pd.getOrig().SizeProto() } func (e *ProtoMarshaler) ResourceProfilesSize(pd ResourceProfiles) int { - return internal.SizeProtoOrigResourceProfiles(pd.orig) + return pd.orig.SizeProto() } func (e *ProtoMarshaler) ScopeProfilesSize(pd ScopeProfiles) int { - return internal.SizeProtoOrigScopeProfiles(pd.orig) + return pd.orig.SizeProto() } func (e *ProtoMarshaler) ProfileSize(pd Profile) int { - return internal.SizeProtoOrigProfile(pd.orig) + return pd.orig.SizeProto() } type ProtoUnmarshaler struct{} func (d *ProtoUnmarshaler) UnmarshalProfiles(buf []byte) (Profiles, error) { pd := NewProfiles() - err := internal.UnmarshalProtoOrigExportProfilesServiceRequest(pd.getOrig(), buf) + err := pd.getOrig().UnmarshalProto(buf) if err != nil { return Profiles{}, err } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportpartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportpartialsuccess.go index 3c3c90ed963..7927c7f4841 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportpartialsuccess.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportpartialsuccess.go @@ -8,7 +8,6 @@ package pprofileotlp import ( "go.opentelemetry.io/collector/pdata/internal" - otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" ) // ExportPartialSuccess represents the details of a partially successful export request. @@ -19,11 +18,11 @@ import ( // Must use NewExportPartialSuccess function to create new instances. // Important: zero-initialized instance is not valid for use. type ExportPartialSuccess struct { - orig *otlpcollectorprofiles.ExportProfilesPartialSuccess + orig *internal.ExportProfilesPartialSuccess state *internal.State } -func newExportPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, state *internal.State) ExportPartialSuccess { +func newExportPartialSuccess(orig *internal.ExportProfilesPartialSuccess, state *internal.State) ExportPartialSuccess { return ExportPartialSuccess{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newExportPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSu // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExportPartialSuccess() ExportPartialSuccess { - return newExportPartialSuccess(internal.NewOrigExportProfilesPartialSuccess(), internal.NewState()) + return newExportPartialSuccess(internal.NewExportProfilesPartialSuccess(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) { if ms.orig == dest.orig { return } - internal.DeleteOrigExportProfilesPartialSuccess(dest.orig, false) + internal.DeleteExportProfilesPartialSuccess(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -73,5 +72,5 @@ func (ms ExportPartialSuccess) SetErrorMessage(v string) { // CopyTo copies all properties from the current struct overriding the destination. func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) { dest.state.AssertMutable() - internal.CopyOrigExportProfilesPartialSuccess(dest.orig, ms.orig) + internal.CopyExportProfilesPartialSuccess(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportresponse.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportresponse.go index 836104f0def..bfd8e7105b9 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportresponse.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportresponse.go @@ -8,7 +8,6 @@ package pprofileotlp import ( "go.opentelemetry.io/collector/pdata/internal" - otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" ) // ExportResponse represents the response for gRPC/HTTP client/server. @@ -19,11 +18,11 @@ import ( // Must use NewExportResponse function to create new instances. // Important: zero-initialized instance is not valid for use. type ExportResponse struct { - orig *otlpcollectorprofiles.ExportProfilesServiceResponse + orig *internal.ExportProfilesServiceResponse state *internal.State } -func newExportResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, state *internal.State) ExportResponse { +func newExportResponse(orig *internal.ExportProfilesServiceResponse, state *internal.State) ExportResponse { return ExportResponse{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newExportResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExportResponse() ExportResponse { - return newExportResponse(internal.NewOrigExportProfilesServiceResponse(), internal.NewState()) + return newExportResponse(internal.NewExportProfilesServiceResponse(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms ExportResponse) MoveTo(dest ExportResponse) { if ms.orig == dest.orig { return } - internal.DeleteOrigExportProfilesServiceResponse(dest.orig, false) + internal.DeleteExportProfilesServiceResponse(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -56,5 +55,5 @@ func (ms ExportResponse) PartialSuccess() ExportPartialSuccess { // CopyTo copies all properties from the current struct overriding the destination. func (ms ExportResponse) CopyTo(dest ExportResponse) { dest.state.AssertMutable() - internal.CopyOrigExportProfilesServiceResponse(dest.orig, ms.orig) + internal.CopyExportProfilesServiceResponse(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/grpc.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/grpc.go index 819df8f6854..fe50966e983 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/grpc.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/grpc.go @@ -11,8 +11,7 @@ import ( "google.golang.org/grpc/status" "go.opentelemetry.io/collector/pdata/internal" - otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" - _ "go.opentelemetry.io/collector/pdata/internal/grpcencoding" // enforces custom gRPC encoding to be loaded. + "go.opentelemetry.io/collector/pdata/internal/otelgrpc" "go.opentelemetry.io/collector/pdata/internal/otlp" ) @@ -32,11 +31,11 @@ type GRPCClient interface { // NewGRPCClient returns a new GRPCClient connected using the given connection. func NewGRPCClient(cc *grpc.ClientConn) GRPCClient { - return &grpcClient{rawClient: otlpcollectorprofile.NewProfilesServiceClient(cc)} + return &grpcClient{rawClient: otelgrpc.NewProfilesServiceClient(cc)} } type grpcClient struct { - rawClient otlpcollectorprofile.ProfilesServiceClient + rawClient otelgrpc.ProfilesServiceClient } // Export implements the Client interface. @@ -76,14 +75,14 @@ func (*UnimplementedGRPCServer) unexported() {} // RegisterGRPCServer registers the GRPCServer to the grpc.Server. func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) { - otlpcollectorprofile.RegisterProfilesServiceServer(s, &rawProfilesServer{srv: srv}) + otelgrpc.RegisterProfilesServiceServer(s, &rawProfilesServer{srv: srv}) } type rawProfilesServer struct { srv GRPCServer } -func (s rawProfilesServer) Export(ctx context.Context, request *otlpcollectorprofile.ExportProfilesServiceRequest) (*otlpcollectorprofile.ExportProfilesServiceResponse, error) { +func (s rawProfilesServer) Export(ctx context.Context, request *internal.ExportProfilesServiceRequest) (*internal.ExportProfilesServiceResponse, error) { otlp.MigrateProfiles(request.ResourceProfiles) rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: internal.NewState()}) return rsp.orig, err diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/request.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/request.go index 3821cc72aab..0e5359941f5 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/request.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/request.go @@ -7,7 +7,6 @@ import ( "slices" "go.opentelemetry.io/collector/pdata/internal" - otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" "go.opentelemetry.io/collector/pdata/pprofile" @@ -16,14 +15,14 @@ import ( // ExportRequest represents the request for gRPC/HTTP client/server. // It's a wrapper for pprofile.Profiles data. type ExportRequest struct { - orig *otlpcollectorprofile.ExportProfilesServiceRequest + orig *internal.ExportProfilesServiceRequest state *internal.State } // NewExportRequest returns an empty ExportRequest. func NewExportRequest() ExportRequest { return ExportRequest{ - orig: &otlpcollectorprofile.ExportProfilesServiceRequest{}, + orig: &internal.ExportProfilesServiceRequest{}, state: internal.NewState(), } } @@ -33,22 +32,22 @@ func NewExportRequest() ExportRequest { // any changes to the provided Profiles struct will be reflected in the ExportRequest and vice versa. func NewExportRequestFromProfiles(td pprofile.Profiles) ExportRequest { return ExportRequest{ - orig: internal.GetOrigProfiles(internal.Profiles(td)), - state: internal.GetProfilesState(internal.Profiles(td)), + orig: internal.GetProfilesOrig(internal.ProfilesWrapper(td)), + state: internal.GetProfilesState(internal.ProfilesWrapper(td)), } } // MarshalProto marshals ExportRequest into proto bytes. func (ms ExportRequest) MarshalProto() ([]byte, error) { - size := internal.SizeProtoOrigExportProfilesServiceRequest(ms.orig) + size := ms.orig.SizeProto() buf := make([]byte, size) - _ = internal.MarshalProtoOrigExportProfilesServiceRequest(ms.orig, buf) + _ = ms.orig.MarshalProto(buf) return buf, nil } // UnmarshalProto unmarshalls ExportRequest from proto bytes. func (ms ExportRequest) UnmarshalProto(data []byte) error { - err := internal.UnmarshalProtoOrigExportProfilesServiceRequest(ms.orig, data) + err := ms.orig.UnmarshalProto(data) if err != nil { return err } @@ -60,7 +59,7 @@ func (ms ExportRequest) UnmarshalProto(data []byte) error { func (ms ExportRequest) MarshalJSON() ([]byte, error) { dest := json.BorrowStream(nil) defer json.ReturnStream(dest) - internal.MarshalJSONOrigExportProfilesServiceRequest(ms.orig, dest) + ms.orig.MarshalJSON(dest) if dest.Error() != nil { return nil, dest.Error() } @@ -71,10 +70,10 @@ func (ms ExportRequest) MarshalJSON() ([]byte, error) { func (ms ExportRequest) UnmarshalJSON(data []byte) error { iter := json.BorrowIterator(data) defer json.ReturnIterator(iter) - internal.UnmarshalJSONOrigExportProfilesServiceRequest(ms.orig, iter) + ms.orig.UnmarshalJSON(iter) return iter.Error() } func (ms ExportRequest) Profiles() pprofile.Profiles { - return pprofile.Profiles(internal.NewProfiles(ms.orig, ms.state)) + return pprofile.Profiles(internal.NewProfilesWrapper(ms.orig, ms.state)) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/response.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/response.go index 2b88ef01c2f..4bd64cecd7c 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/response.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/response.go @@ -6,28 +6,27 @@ package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/ppr import ( "slices" - "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/internal/json" ) // MarshalProto marshals ExportResponse into proto bytes. func (ms ExportResponse) MarshalProto() ([]byte, error) { - size := internal.SizeProtoOrigExportProfilesServiceResponse(ms.orig) + size := ms.orig.SizeProto() buf := make([]byte, size) - _ = internal.MarshalProtoOrigExportProfilesServiceResponse(ms.orig, buf) + _ = ms.orig.MarshalProto(buf) return buf, nil } // UnmarshalProto unmarshalls ExportResponse from proto bytes. func (ms ExportResponse) UnmarshalProto(data []byte) error { - return internal.UnmarshalProtoOrigExportProfilesServiceResponse(ms.orig, data) + return ms.orig.UnmarshalProto(data) } // MarshalJSON marshals ExportResponse into JSON bytes. func (ms ExportResponse) MarshalJSON() ([]byte, error) { dest := json.BorrowStream(nil) defer json.ReturnStream(dest) - internal.MarshalJSONOrigExportProfilesServiceResponse(ms.orig, dest) + ms.orig.MarshalJSON(dest) return slices.Clone(dest.Buffer()), dest.Error() } @@ -35,6 +34,6 @@ func (ms ExportResponse) MarshalJSON() ([]byte, error) { func (ms ExportResponse) UnmarshalJSON(data []byte) error { iter := json.BorrowIterator(data) defer json.ReturnIterator(iter) - internal.UnmarshalJSONOrigExportProfilesServiceResponse(ms.orig, iter) + ms.orig.UnmarshalJSON(iter) return iter.Error() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/profile.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profile.go new file mode 100644 index 00000000000..dd92ee3092f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profile.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import ( + "fmt" + + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// switchDictionary updates the Profile, switching its indices from one +// dictionary to another. +func (ms Profile) switchDictionary(src, dst ProfilesDictionary) error { + for i, v := range ms.AttributeIndices().All() { + if src.AttributeTable().Len() <= int(v) { + return fmt.Errorf("invalid attribute index %d", v) + } + + attr := src.AttributeTable().At(int(v)) + err := attr.switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("couldn't switch dictionary for attribute %d: %w", i, err) + } + idx, err := SetAttribute(dst.AttributeTable(), attr) + if err != nil { + return fmt.Errorf("couldn't set attribute %d: %w", i, err) + } + ms.AttributeIndices().SetAt(i, idx) + } + + for i, v := range ms.Samples().All() { + err := v.switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("error switching dictionary for sample %d: %w", i, err) + } + } + + err := ms.PeriodType().switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("error switching dictionary for period type: %w", err) + } + err = ms.SampleType().switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("error switching dictionary for sample type: %w", err) + } + + return nil +} + +// Duration returns the duration associated with this Profile. +// +// Deprecated: Use Profile.DurationNano instead. +func (ms Profile) Duration() pcommon.Timestamp { + return pcommon.Timestamp(0) +} + +// SetDuration replaces the duration associated with this Profile. +// +// Deprecated: Use Profile.SetDurationNano instead. +func (ms Profile) SetDuration(_ pcommon.Timestamp) { +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go index 52ee2e812b6..35c411ea7fd 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go @@ -6,7 +6,7 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" import ( "encoding/hex" - "go.opentelemetry.io/collector/pdata/internal/data" + "go.opentelemetry.io/collector/pdata/internal" ) var emptyProfileID = ProfileID([16]byte{}) @@ -33,5 +33,5 @@ func (ms ProfileID) String() string { // IsEmpty returns true if id doesn't contain at least one non-zero byte. func (ms ProfileID) IsEmpty() bool { - return data.ProfileID(ms).IsEmpty() + return internal.ProfileID(ms).IsEmpty() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go index bda6738d467..427dbcac323 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go @@ -3,6 +3,8 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" +import "fmt" + // MarkReadOnly marks the ResourceProfiles as shared so that no further modifications can be done on it. func (ms Profiles) MarkReadOnly() { ms.getState().MarkReadOnly() @@ -23,9 +25,36 @@ func (ms Profiles) SampleCount() int { for j := 0; j < sps.Len(); j++ { pcs := sps.At(j).Profiles() for k := 0; k < pcs.Len(); k++ { - sampleCount += pcs.At(k).Sample().Len() + sampleCount += pcs.At(k).Samples().Len() } } } return sampleCount } + +// switchDictionary updates the Profiles, switching its indices from one +// dictionary to another. +func (ms Profiles) switchDictionary(src, dst ProfilesDictionary) error { + for i, v := range ms.ResourceProfiles().All() { + err := v.switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("error switching dictionary for resource profile %d: %w", i, err) + } + } + + return nil +} + +// ProfileCount calculates the total number of profile records. +func (ms Profiles) ProfileCount() int { + profileCount := 0 + rps := ms.ResourceProfiles() + for i := 0; i < rps.Len(); i++ { + rp := rps.At(i) + sps := rp.ScopeProfiles() + for j := 0; j < sps.Len(); j++ { + profileCount += sps.At(j).Profiles().Len() + } + } + return profileCount +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles_merge.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles_merge.go new file mode 100644 index 00000000000..851bf739d99 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles_merge.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +// MergeTo merges the current Profiles into dest, updating the destination +// dictionary as needed and appending the resource profiles. +// The source Profiles is consumed and marked read-only after this operation. +func (ms Profiles) MergeTo(dest Profiles) error { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + if ms.getOrig() == dest.getOrig() { + return nil + } + + if err := ms.switchDictionary(ms.Dictionary(), dest.Dictionary()); err != nil { + return err + } + + ms.ResourceProfiles().MoveAndAppendTo(dest.ResourceProfiles()) + ms.MarkReadOnly() + + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/resourceprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/resourceprofiles.go new file mode 100644 index 00000000000..3866b5f719c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/resourceprofiles.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import "fmt" + +// switchDictionary updates the ResourceProfiles, switching its indices from one +// dictionary to another. +func (ms ResourceProfiles) switchDictionary(src, dst ProfilesDictionary) error { + for i, v := range ms.ScopeProfiles().All() { + err := v.switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("error switching dictionary for scope profile %d: %w", i, err) + } + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/sample.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/sample.go new file mode 100644 index 00000000000..23db19e9c32 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/sample.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import "fmt" + +// switchDictionary updates the Sample, switching its indices from one +// dictionary to another. +func (ms Sample) switchDictionary(src, dst ProfilesDictionary) error { + for i, v := range ms.AttributeIndices().All() { + if src.AttributeTable().Len() <= int(v) { + return fmt.Errorf("invalid attribute index %d", v) + } + + attr := src.AttributeTable().At(int(v)) + err := attr.switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("couldn't switch dictionary for attribute %d: %w", i, err) + } + idx, err := SetAttribute(dst.AttributeTable(), attr) + if err != nil { + return fmt.Errorf("couldn't set attribute %d: %w", i, err) + } + ms.AttributeIndices().SetAt(i, idx) + } + + if ms.LinkIndex() > 0 { + if src.LinkTable().Len() <= int(ms.LinkIndex()) { + return fmt.Errorf("invalid link index %d", ms.LinkIndex()) + } + + idx, err := SetLink(dst.LinkTable(), src.LinkTable().At(int(ms.LinkIndex()))) + if err != nil { + return fmt.Errorf("couldn't set link: %w", err) + } + ms.SetLinkIndex(idx) + } + + if ms.StackIndex() > 0 { + if src.StackTable().Len() <= int(ms.StackIndex()) { + return fmt.Errorf("invalid stack index %d", ms.StackIndex()) + } + + stack := src.StackTable().At(int(ms.StackIndex())) + err := stack.switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("couldn't switch stack dictionary: %w", err) + } + + idx, err := SetStack(dst.StackTable(), stack) + if err != nil { + return fmt.Errorf("couldn't set stack: %w", err) + } + ms.SetStackIndex(idx) + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/scopeprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/scopeprofiles.go new file mode 100644 index 00000000000..89d098c23f5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/scopeprofiles.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import "fmt" + +// switchDictionary updates the ScopeProfiles, switching its indices from one +// dictionary to another. +func (ms ScopeProfiles) switchDictionary(src, dst ProfilesDictionary) error { + for i, v := range ms.Profiles().All() { + err := v.switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("error switching dictionary for profile %d: %w", i, err) + } + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/stack.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/stack.go index 3629b0ee995..8d4f5be1d97 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/stack.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/stack.go @@ -3,6 +3,10 @@ package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" +import ( + "fmt" +) + // Equal checks equality with another Stack func (ms Stack) Equal(val Stack) bool { if ms.LocationIndices().Len() != val.LocationIndices().Len() { @@ -17,3 +21,26 @@ func (ms Stack) Equal(val Stack) bool { return true } + +// switchDictionary updates the Stack, switching its indices from one +// dictionary to another. +func (ms Stack) switchDictionary(src, dst ProfilesDictionary) error { + for i, v := range ms.LocationIndices().All() { + if src.LocationTable().Len() <= int(v) { + return fmt.Errorf("invalid location index %d", v) + } + + loc := src.LocationTable().At(int(v)) + err := loc.switchDictionary(src, dst) + if err != nil { + return fmt.Errorf("couldn't switch dictionary for location: %w", err) + } + idx, err := SetLocation(dst.LocationTable(), loc) + if err != nil { + return fmt.Errorf("couldn't set location %d: %w", i, err) + } + ms.LocationIndices().SetAt(i, idx) + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/stacks.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/stacks.go new file mode 100644 index 00000000000..895f2fc4e6d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/stacks.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import ( + "errors" + "math" +) + +var errTooManyStackTableEntries = errors.New("too many entries in StackTable") + +// SetStack updates a StackSlice, adding or providing a stack and returns its +// index. +func SetStack(table StackSlice, st Stack) (int32, error) { + for j, l := range table.All() { + if l.Equal(st) { + if j > math.MaxInt32 { + return 0, errTooManyStackTableEntries + } + return int32(j), nil + } + } + + if table.Len() >= math.MaxInt32 { + return 0, errTooManyStackTableEntries + } + + st.CopyTo(table.AppendEmpty()) + return int32(table.Len() - 1), nil +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/string_table.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/string_table.go index 3c1ffb304c8..e1f4d6023f3 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pprofile/string_table.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/string_table.go @@ -20,7 +20,7 @@ func SetString(table pcommon.StringSlice, val string) (int32, error) { return 0, errTooManyStringTableEntries } // Return the index of the existing value. - return int32(j), nil //nolint:gosec // G115 overflow checked + return int32(j), nil } } @@ -29,5 +29,5 @@ func SetString(table pcommon.StringSlice, val string) (int32, error) { } table.Append(val) - return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked + return int32(table.Len() - 1), nil } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/valuetype.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/valuetype.go new file mode 100644 index 00000000000..1d6339f68cf --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/valuetype.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import "fmt" + +// switchDictionary updates the ValueType, switching its indices from one +// dictionary to another. +func (ms ValueType) switchDictionary(src, dst ProfilesDictionary) error { + if ms.TypeStrindex() > 0 { + if src.StringTable().Len() <= int(ms.TypeStrindex()) { + return fmt.Errorf("invalid type index %d", ms.TypeStrindex()) + } + + idx, err := SetString(dst.StringTable(), src.StringTable().At(int(ms.TypeStrindex()))) + if err != nil { + return fmt.Errorf("couldn't set type: %w", err) + } + ms.SetTypeStrindex(idx) + } + + if ms.UnitStrindex() > 0 { + if src.StringTable().Len() <= int(ms.UnitStrindex()) { + return fmt.Errorf("invalid unit index %d", ms.UnitStrindex()) + } + + idx, err := SetString(dst.StringTable(), src.StringTable().At(int(ms.UnitStrindex()))) + if err != nil { + return fmt.Errorf("couldn't set unit: %w", err) + } + ms.SetUnitStrindex(idx) + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go index 7db68c6ef75..c68406cadeb 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go @@ -8,7 +8,6 @@ package ptrace import ( "go.opentelemetry.io/collector/pdata/internal" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewResourceSpans function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceSpans struct { - orig *otlptrace.ResourceSpans + orig *internal.ResourceSpans state *internal.State } -func newResourceSpans(orig *otlptrace.ResourceSpans, state *internal.State) ResourceSpans { +func newResourceSpans(orig *internal.ResourceSpans, state *internal.State) ResourceSpans { return ResourceSpans{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newResourceSpans(orig *otlptrace.ResourceSpans, state *internal.State) Reso // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewResourceSpans() ResourceSpans { - return newResourceSpans(internal.NewOrigResourceSpans(), internal.NewState()) + return newResourceSpans(internal.NewResourceSpans(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,13 +44,13 @@ func (ms ResourceSpans) MoveTo(dest ResourceSpans) { if ms.orig == dest.orig { return } - internal.DeleteOrigResourceSpans(dest.orig, false) + internal.DeleteResourceSpans(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // Resource returns the resource associated with this ResourceSpans. func (ms ResourceSpans) Resource() pcommon.Resource { - return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state)) + return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state)) } // ScopeSpans returns the ScopeSpans associated with this ResourceSpans. @@ -73,5 +72,5 @@ func (ms ResourceSpans) SetSchemaUrl(v string) { // CopyTo copies all properties from the current struct overriding the destination. func (ms ResourceSpans) CopyTo(dest ResourceSpans) { dest.state.AssertMutable() - internal.CopyOrigResourceSpans(dest.orig, ms.orig) + internal.CopyResourceSpans(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go index 565f413dd90..ed2a7a87230 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // ResourceSpansSlice logically represents a slice of ResourceSpans. @@ -22,18 +21,18 @@ import ( // Must use NewResourceSpansSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceSpansSlice struct { - orig *[]*otlptrace.ResourceSpans + orig *[]*internal.ResourceSpans state *internal.State } -func newResourceSpansSlice(orig *[]*otlptrace.ResourceSpans, state *internal.State) ResourceSpansSlice { +func newResourceSpansSlice(orig *[]*internal.ResourceSpans, state *internal.State) ResourceSpansSlice { return ResourceSpansSlice{orig: orig, state: state} } -// NewResourceSpansSlice creates a ResourceSpansSlice with 0 elements. +// NewResourceSpansSlice creates a ResourceSpansSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewResourceSpansSlice() ResourceSpansSlice { - orig := []*otlptrace.ResourceSpans(nil) + orig := []*internal.ResourceSpans(nil) return newResourceSpansSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es ResourceSpansSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlptrace.ResourceSpans, len(*es.orig), newCap) + newOrig := make([]*internal.ResourceSpans, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es ResourceSpansSlice) EnsureCapacity(newCap int) { // It returns the newly added ResourceSpans. func (es ResourceSpansSlice) AppendEmpty() ResourceSpans { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigResourceSpans()) + *es.orig = append(*es.orig, internal.NewResourceSpans()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es ResourceSpansSlice) RemoveIf(f func(ResourceSpans) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigResourceSpans((*es.orig)[i], true) + internal.DeleteResourceSpans((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es ResourceSpansSlice) CopyTo(dest ResourceSpansSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigResourceSpansSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyResourceSpansPtrSlice(*dest.orig, *es.orig) } // Sort sorts the ResourceSpans elements within ResourceSpansSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go index 5ea42e985a6..a06c1df34d9 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go @@ -8,7 +8,6 @@ package ptrace import ( "go.opentelemetry.io/collector/pdata/internal" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -20,11 +19,11 @@ import ( // Must use NewScopeSpans function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeSpans struct { - orig *otlptrace.ScopeSpans + orig *internal.ScopeSpans state *internal.State } -func newScopeSpans(orig *otlptrace.ScopeSpans, state *internal.State) ScopeSpans { +func newScopeSpans(orig *internal.ScopeSpans, state *internal.State) ScopeSpans { return ScopeSpans{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newScopeSpans(orig *otlptrace.ScopeSpans, state *internal.State) ScopeSpans // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewScopeSpans() ScopeSpans { - return newScopeSpans(internal.NewOrigScopeSpans(), internal.NewState()) + return newScopeSpans(internal.NewScopeSpans(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,13 +44,13 @@ func (ms ScopeSpans) MoveTo(dest ScopeSpans) { if ms.orig == dest.orig { return } - internal.DeleteOrigScopeSpans(dest.orig, false) + internal.DeleteScopeSpans(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } // Scope returns the scope associated with this ScopeSpans. func (ms ScopeSpans) Scope() pcommon.InstrumentationScope { - return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state)) + return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state)) } // Spans returns the Spans associated with this ScopeSpans. @@ -73,5 +72,5 @@ func (ms ScopeSpans) SetSchemaUrl(v string) { // CopyTo copies all properties from the current struct overriding the destination. func (ms ScopeSpans) CopyTo(dest ScopeSpans) { dest.state.AssertMutable() - internal.CopyOrigScopeSpans(dest.orig, ms.orig) + internal.CopyScopeSpans(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go index 8e2fbf65724..2522d5b2b6c 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // ScopeSpansSlice logically represents a slice of ScopeSpans. @@ -22,18 +21,18 @@ import ( // Must use NewScopeSpansSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeSpansSlice struct { - orig *[]*otlptrace.ScopeSpans + orig *[]*internal.ScopeSpans state *internal.State } -func newScopeSpansSlice(orig *[]*otlptrace.ScopeSpans, state *internal.State) ScopeSpansSlice { +func newScopeSpansSlice(orig *[]*internal.ScopeSpans, state *internal.State) ScopeSpansSlice { return ScopeSpansSlice{orig: orig, state: state} } -// NewScopeSpansSlice creates a ScopeSpansSlice with 0 elements. +// NewScopeSpansSlice creates a ScopeSpansSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewScopeSpansSlice() ScopeSpansSlice { - orig := []*otlptrace.ScopeSpans(nil) + orig := []*internal.ScopeSpans(nil) return newScopeSpansSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es ScopeSpansSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlptrace.ScopeSpans, len(*es.orig), newCap) + newOrig := make([]*internal.ScopeSpans, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es ScopeSpansSlice) EnsureCapacity(newCap int) { // It returns the newly added ScopeSpans. func (es ScopeSpansSlice) AppendEmpty() ScopeSpans { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigScopeSpans()) + *es.orig = append(*es.orig, internal.NewScopeSpans()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es ScopeSpansSlice) RemoveIf(f func(ScopeSpans) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigScopeSpans((*es.orig)[i], true) + internal.DeleteScopeSpans((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es ScopeSpansSlice) CopyTo(dest ScopeSpansSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigScopeSpansSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopyScopeSpansPtrSlice(*dest.orig, *es.orig) } // Sort sorts the ScopeSpans elements within ScopeSpansSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go index 433487d4197..238386c1798 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go @@ -8,8 +8,6 @@ package ptrace import ( "go.opentelemetry.io/collector/pdata/internal" - "go.opentelemetry.io/collector/pdata/internal/data" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -22,11 +20,11 @@ import ( // Must use NewSpan function to create new instances. // Important: zero-initialized instance is not valid for use. type Span struct { - orig *otlptrace.Span + orig *internal.Span state *internal.State } -func newSpan(orig *otlptrace.Span, state *internal.State) Span { +func newSpan(orig *internal.Span, state *internal.State) Span { return Span{orig: orig, state: state} } @@ -35,7 +33,7 @@ func newSpan(orig *otlptrace.Span, state *internal.State) Span { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSpan() Span { - return newSpan(internal.NewOrigSpan(), internal.NewState()) + return newSpan(internal.NewSpan(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -47,7 +45,7 @@ func (ms Span) MoveTo(dest Span) { if ms.orig == dest.orig { return } - internal.DeleteOrigSpan(dest.orig, false) + internal.DeleteSpan(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -59,7 +57,7 @@ func (ms Span) TraceID() pcommon.TraceID { // SetTraceID replaces the traceid associated with this Span. func (ms Span) SetTraceID(v pcommon.TraceID) { ms.state.AssertMutable() - ms.orig.TraceId = data.TraceID(v) + ms.orig.TraceId = internal.TraceID(v) } // SpanID returns the spanid associated with this Span. @@ -70,12 +68,12 @@ func (ms Span) SpanID() pcommon.SpanID { // SetSpanID replaces the spanid associated with this Span. func (ms Span) SetSpanID(v pcommon.SpanID) { ms.state.AssertMutable() - ms.orig.SpanId = data.SpanID(v) + ms.orig.SpanId = internal.SpanID(v) } // TraceState returns the tracestate associated with this Span. func (ms Span) TraceState() pcommon.TraceState { - return pcommon.TraceState(internal.NewTraceState(&ms.orig.TraceState, ms.state)) + return pcommon.TraceState(internal.NewTraceStateWrapper(&ms.orig.TraceState, ms.state)) } // ParentSpanID returns the parentspanid associated with this Span. @@ -86,7 +84,7 @@ func (ms Span) ParentSpanID() pcommon.SpanID { // SetParentSpanID replaces the parentspanid associated with this Span. func (ms Span) SetParentSpanID(v pcommon.SpanID) { ms.state.AssertMutable() - ms.orig.ParentSpanId = data.SpanID(v) + ms.orig.ParentSpanId = internal.SpanID(v) } // Flags returns the flags associated with this Span. @@ -119,7 +117,7 @@ func (ms Span) Kind() SpanKind { // SetKind replaces the kind associated with this Span. func (ms Span) SetKind(v SpanKind) { ms.state.AssertMutable() - ms.orig.Kind = otlptrace.Span_SpanKind(v) + ms.orig.Kind = internal.SpanKind(v) } // StartTimestamp returns the starttimestamp associated with this Span. @@ -146,7 +144,7 @@ func (ms Span) SetEndTimestamp(v pcommon.Timestamp) { // Attributes returns the Attributes associated with this Span. func (ms Span) Attributes() pcommon.Map { - return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) + return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state)) } // DroppedAttributesCount returns the droppedattributescount associated with this Span. @@ -200,5 +198,5 @@ func (ms Span) Status() Status { // CopyTo copies all properties from the current struct overriding the destination. func (ms Span) CopyTo(dest Span) { dest.state.AssertMutable() - internal.CopyOrigSpan(dest.orig, ms.orig) + internal.CopySpan(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go index f474aca804f..333f5d47b4f 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go @@ -8,7 +8,6 @@ package ptrace import ( "go.opentelemetry.io/collector/pdata/internal" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -21,11 +20,11 @@ import ( // Must use NewSpanEvent function to create new instances. // Important: zero-initialized instance is not valid for use. type SpanEvent struct { - orig *otlptrace.Span_Event + orig *internal.SpanEvent state *internal.State } -func newSpanEvent(orig *otlptrace.Span_Event, state *internal.State) SpanEvent { +func newSpanEvent(orig *internal.SpanEvent, state *internal.State) SpanEvent { return SpanEvent{orig: orig, state: state} } @@ -34,7 +33,7 @@ func newSpanEvent(orig *otlptrace.Span_Event, state *internal.State) SpanEvent { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSpanEvent() SpanEvent { - return newSpanEvent(internal.NewOrigSpan_Event(), internal.NewState()) + return newSpanEvent(internal.NewSpanEvent(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -46,7 +45,7 @@ func (ms SpanEvent) MoveTo(dest SpanEvent) { if ms.orig == dest.orig { return } - internal.DeleteOrigSpan_Event(dest.orig, false) + internal.DeleteSpanEvent(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -74,7 +73,7 @@ func (ms SpanEvent) SetName(v string) { // Attributes returns the Attributes associated with this SpanEvent. func (ms SpanEvent) Attributes() pcommon.Map { - return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) + return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state)) } // DroppedAttributesCount returns the droppedattributescount associated with this SpanEvent. @@ -91,5 +90,5 @@ func (ms SpanEvent) SetDroppedAttributesCount(v uint32) { // CopyTo copies all properties from the current struct overriding the destination. func (ms SpanEvent) CopyTo(dest SpanEvent) { dest.state.AssertMutable() - internal.CopyOrigSpan_Event(dest.orig, ms.orig) + internal.CopySpanEvent(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go index c0da19167d2..f21a4d671a8 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // SpanEventSlice logically represents a slice of SpanEvent. @@ -22,18 +21,18 @@ import ( // Must use NewSpanEventSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type SpanEventSlice struct { - orig *[]*otlptrace.Span_Event + orig *[]*internal.SpanEvent state *internal.State } -func newSpanEventSlice(orig *[]*otlptrace.Span_Event, state *internal.State) SpanEventSlice { +func newSpanEventSlice(orig *[]*internal.SpanEvent, state *internal.State) SpanEventSlice { return SpanEventSlice{orig: orig, state: state} } -// NewSpanEventSlice creates a SpanEventSlice with 0 elements. +// NewSpanEventSlice creates a SpanEventSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewSpanEventSlice() SpanEventSlice { - orig := []*otlptrace.Span_Event(nil) + orig := []*internal.SpanEvent(nil) return newSpanEventSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es SpanEventSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlptrace.Span_Event, len(*es.orig), newCap) + newOrig := make([]*internal.SpanEvent, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es SpanEventSlice) EnsureCapacity(newCap int) { // It returns the newly added SpanEvent. func (es SpanEventSlice) AppendEmpty() SpanEvent { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigSpan_Event()) + *es.orig = append(*es.orig, internal.NewSpanEvent()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es SpanEventSlice) RemoveIf(f func(SpanEvent) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigSpan_Event((*es.orig)[i], true) + internal.DeleteSpanEvent((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es SpanEventSlice) CopyTo(dest SpanEventSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigSpan_EventSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopySpanEventPtrSlice(*dest.orig, *es.orig) } // Sort sorts the SpanEvent elements within SpanEventSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go index cfe1caaaf39..ab585857a56 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go @@ -8,8 +8,6 @@ package ptrace import ( "go.opentelemetry.io/collector/pdata/internal" - "go.opentelemetry.io/collector/pdata/internal/data" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) @@ -23,11 +21,11 @@ import ( // Must use NewSpanLink function to create new instances. // Important: zero-initialized instance is not valid for use. type SpanLink struct { - orig *otlptrace.Span_Link + orig *internal.SpanLink state *internal.State } -func newSpanLink(orig *otlptrace.Span_Link, state *internal.State) SpanLink { +func newSpanLink(orig *internal.SpanLink, state *internal.State) SpanLink { return SpanLink{orig: orig, state: state} } @@ -36,7 +34,7 @@ func newSpanLink(orig *otlptrace.Span_Link, state *internal.State) SpanLink { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSpanLink() SpanLink { - return newSpanLink(internal.NewOrigSpan_Link(), internal.NewState()) + return newSpanLink(internal.NewSpanLink(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -48,7 +46,7 @@ func (ms SpanLink) MoveTo(dest SpanLink) { if ms.orig == dest.orig { return } - internal.DeleteOrigSpan_Link(dest.orig, false) + internal.DeleteSpanLink(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -60,7 +58,7 @@ func (ms SpanLink) TraceID() pcommon.TraceID { // SetTraceID replaces the traceid associated with this SpanLink. func (ms SpanLink) SetTraceID(v pcommon.TraceID) { ms.state.AssertMutable() - ms.orig.TraceId = data.TraceID(v) + ms.orig.TraceId = internal.TraceID(v) } // SpanID returns the spanid associated with this SpanLink. @@ -71,17 +69,17 @@ func (ms SpanLink) SpanID() pcommon.SpanID { // SetSpanID replaces the spanid associated with this SpanLink. func (ms SpanLink) SetSpanID(v pcommon.SpanID) { ms.state.AssertMutable() - ms.orig.SpanId = data.SpanID(v) + ms.orig.SpanId = internal.SpanID(v) } // TraceState returns the tracestate associated with this SpanLink. func (ms SpanLink) TraceState() pcommon.TraceState { - return pcommon.TraceState(internal.NewTraceState(&ms.orig.TraceState, ms.state)) + return pcommon.TraceState(internal.NewTraceStateWrapper(&ms.orig.TraceState, ms.state)) } // Attributes returns the Attributes associated with this SpanLink. func (ms SpanLink) Attributes() pcommon.Map { - return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) + return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state)) } // DroppedAttributesCount returns the droppedattributescount associated with this SpanLink. @@ -109,5 +107,5 @@ func (ms SpanLink) SetFlags(v uint32) { // CopyTo copies all properties from the current struct overriding the destination. func (ms SpanLink) CopyTo(dest SpanLink) { dest.state.AssertMutable() - internal.CopyOrigSpan_Link(dest.orig, ms.orig) + internal.CopySpanLink(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go index a4626b3f16f..65b8b473591 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // SpanLinkSlice logically represents a slice of SpanLink. @@ -22,18 +21,18 @@ import ( // Must use NewSpanLinkSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type SpanLinkSlice struct { - orig *[]*otlptrace.Span_Link + orig *[]*internal.SpanLink state *internal.State } -func newSpanLinkSlice(orig *[]*otlptrace.Span_Link, state *internal.State) SpanLinkSlice { +func newSpanLinkSlice(orig *[]*internal.SpanLink, state *internal.State) SpanLinkSlice { return SpanLinkSlice{orig: orig, state: state} } -// NewSpanLinkSlice creates a SpanLinkSlice with 0 elements. +// NewSpanLinkSlice creates a SpanLinkSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewSpanLinkSlice() SpanLinkSlice { - orig := []*otlptrace.Span_Link(nil) + orig := []*internal.SpanLink(nil) return newSpanLinkSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es SpanLinkSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlptrace.Span_Link, len(*es.orig), newCap) + newOrig := make([]*internal.SpanLink, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es SpanLinkSlice) EnsureCapacity(newCap int) { // It returns the newly added SpanLink. func (es SpanLinkSlice) AppendEmpty() SpanLink { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigSpan_Link()) + *es.orig = append(*es.orig, internal.NewSpanLink()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es SpanLinkSlice) RemoveIf(f func(SpanLink) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigSpan_Link((*es.orig)[i], true) + internal.DeleteSpanLink((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es SpanLinkSlice) CopyTo(dest SpanLinkSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigSpan_LinkSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopySpanLinkPtrSlice(*dest.orig, *es.orig) } // Sort sorts the SpanLink elements within SpanLinkSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go index 7417966fddb..9cbc685062d 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go @@ -11,7 +11,6 @@ import ( "sort" "go.opentelemetry.io/collector/pdata/internal" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // SpanSlice logically represents a slice of Span. @@ -22,18 +21,18 @@ import ( // Must use NewSpanSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type SpanSlice struct { - orig *[]*otlptrace.Span + orig *[]*internal.Span state *internal.State } -func newSpanSlice(orig *[]*otlptrace.Span, state *internal.State) SpanSlice { +func newSpanSlice(orig *[]*internal.Span, state *internal.State) SpanSlice { return SpanSlice{orig: orig, state: state} } -// NewSpanSlice creates a SpanSlice with 0 elements. +// NewSpanSlice creates a SpanSliceWrapper with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewSpanSlice() SpanSlice { - orig := []*otlptrace.Span(nil) + orig := []*internal.Span(nil) return newSpanSlice(&orig, internal.NewState()) } @@ -90,7 +89,7 @@ func (es SpanSlice) EnsureCapacity(newCap int) { return } - newOrig := make([]*otlptrace.Span, len(*es.orig), newCap) + newOrig := make([]*internal.Span, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } @@ -99,7 +98,7 @@ func (es SpanSlice) EnsureCapacity(newCap int) { // It returns the newly added Span. func (es SpanSlice) AppendEmpty() Span { es.state.AssertMutable() - *es.orig = append(*es.orig, internal.NewOrigSpan()) + *es.orig = append(*es.orig, internal.NewSpan()) return es.At(es.Len() - 1) } @@ -128,7 +127,7 @@ func (es SpanSlice) RemoveIf(f func(Span) bool) { newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { - internal.DeleteOrigSpan((*es.orig)[i], true) + internal.DeleteSpan((*es.orig)[i], true) (*es.orig)[i] = nil continue @@ -152,7 +151,7 @@ func (es SpanSlice) CopyTo(dest SpanSlice) { if es.orig == dest.orig { return } - *dest.orig = internal.CopyOrigSpanSlice(*dest.orig, *es.orig) + *dest.orig = internal.CopySpanPtrSlice(*dest.orig, *es.orig) } // Sort sorts the Span elements within SpanSlice given the diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go index e46b45f31b4..da1a8272601 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go @@ -8,7 +8,6 @@ package ptrace import ( "go.opentelemetry.io/collector/pdata/internal" - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // Status is an optional final status for this span. Semantically, when Status was not @@ -20,11 +19,11 @@ import ( // Must use NewStatus function to create new instances. // Important: zero-initialized instance is not valid for use. type Status struct { - orig *otlptrace.Status + orig *internal.Status state *internal.State } -func newStatus(orig *otlptrace.Status, state *internal.State) Status { +func newStatus(orig *internal.Status, state *internal.State) Status { return Status{orig: orig, state: state} } @@ -33,7 +32,7 @@ func newStatus(orig *otlptrace.Status, state *internal.State) Status { // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewStatus() Status { - return newStatus(internal.NewOrigStatus(), internal.NewState()) + return newStatus(internal.NewStatus(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -45,7 +44,7 @@ func (ms Status) MoveTo(dest Status) { if ms.orig == dest.orig { return } - internal.DeleteOrigStatus(dest.orig, false) + internal.DeleteStatus(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -68,11 +67,11 @@ func (ms Status) Code() StatusCode { // SetCode replaces the code associated with this Status. func (ms Status) SetCode(v StatusCode) { ms.state.AssertMutable() - ms.orig.Code = otlptrace.Status_StatusCode(v) + ms.orig.Code = internal.StatusCode(v) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Status) CopyTo(dest Status) { dest.state.AssertMutable() - internal.CopyOrigStatus(dest.orig, ms.orig) + internal.CopyStatus(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_traces.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_traces.go index 4334e8c79c0..5e69bc03f27 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_traces.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_traces.go @@ -8,7 +8,6 @@ package ptrace import ( "go.opentelemetry.io/collector/pdata/internal" - otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" ) // Traces is the top-level struct that is propagated through the traces pipeline. @@ -19,10 +18,10 @@ import ( // // Must use NewTraces function to create new instances. // Important: zero-initialized instance is not valid for use. -type Traces internal.Traces +type Traces internal.TracesWrapper -func newTraces(orig *otlpcollectortrace.ExportTraceServiceRequest, state *internal.State) Traces { - return Traces(internal.NewTraces(orig, state)) +func newTraces(orig *internal.ExportTraceServiceRequest, state *internal.State) Traces { + return Traces(internal.NewTracesWrapper(orig, state)) } // NewTraces creates a new empty Traces. @@ -30,7 +29,7 @@ func newTraces(orig *otlpcollectortrace.ExportTraceServiceRequest, state *intern // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewTraces() Traces { - return newTraces(internal.NewOrigExportTraceServiceRequest(), internal.NewState()) + return newTraces(internal.NewExportTraceServiceRequest(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -42,7 +41,7 @@ func (ms Traces) MoveTo(dest Traces) { if ms.getOrig() == dest.getOrig() { return } - internal.DeleteOrigExportTraceServiceRequest(dest.getOrig(), false) + internal.DeleteExportTraceServiceRequest(dest.getOrig(), false) *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig() } @@ -54,13 +53,13 @@ func (ms Traces) ResourceSpans() ResourceSpansSlice { // CopyTo copies all properties from the current struct overriding the destination. func (ms Traces) CopyTo(dest Traces) { dest.getState().AssertMutable() - internal.CopyOrigExportTraceServiceRequest(dest.getOrig(), ms.getOrig()) + internal.CopyExportTraceServiceRequest(dest.getOrig(), ms.getOrig()) } -func (ms Traces) getOrig() *otlpcollectortrace.ExportTraceServiceRequest { - return internal.GetOrigTraces(internal.Traces(ms)) +func (ms Traces) getOrig() *internal.ExportTraceServiceRequest { + return internal.GetTracesOrig(internal.TracesWrapper(ms)) } func (ms Traces) getState() *internal.State { - return internal.GetTracesState(internal.Traces(ms)) + return internal.GetTracesState(internal.TracesWrapper(ms)) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go index 1c1cab317dd..a6f0dd36f41 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go @@ -6,7 +6,6 @@ package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" import ( "slices" - "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" ) @@ -18,7 +17,7 @@ type JSONMarshaler struct{} func (*JSONMarshaler) MarshalTraces(td Traces) ([]byte, error) { dest := json.BorrowStream(nil) defer json.ReturnStream(dest) - internal.MarshalJSONOrigExportTraceServiceRequest(td.getOrig(), dest) + td.getOrig().MarshalJSON(dest) if dest.Error() != nil { return nil, dest.Error() } @@ -33,7 +32,7 @@ func (*JSONUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) { iter := json.BorrowIterator(buf) defer json.ReturnIterator(iter) td := NewTraces() - internal.UnmarshalJSONOrigExportTraceServiceRequest(td.getOrig(), iter) + td.getOrig().UnmarshalJSON(iter) if iter.Error() != nil { return Traces{}, iter.Error() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go index 1502f818a96..c9a02b2899a 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go @@ -3,42 +3,38 @@ package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" -import ( - "go.opentelemetry.io/collector/pdata/internal" -) - var _ MarshalSizer = (*ProtoMarshaler)(nil) type ProtoMarshaler struct{} func (e *ProtoMarshaler) MarshalTraces(td Traces) ([]byte, error) { - size := internal.SizeProtoOrigExportTraceServiceRequest(td.getOrig()) + size := td.getOrig().SizeProto() buf := make([]byte, size) - _ = internal.MarshalProtoOrigExportTraceServiceRequest(td.getOrig(), buf) + _ = td.getOrig().MarshalProto(buf) return buf, nil } func (e *ProtoMarshaler) TracesSize(td Traces) int { - return internal.SizeProtoOrigExportTraceServiceRequest(td.getOrig()) + return td.getOrig().SizeProto() } func (e *ProtoMarshaler) ResourceSpansSize(td ResourceSpans) int { - return internal.SizeProtoOrigResourceSpans(td.orig) + return td.orig.SizeProto() } func (e *ProtoMarshaler) ScopeSpansSize(td ScopeSpans) int { - return internal.SizeProtoOrigScopeSpans(td.orig) + return td.orig.SizeProto() } func (e *ProtoMarshaler) SpanSize(td Span) int { - return internal.SizeProtoOrigSpan(td.orig) + return td.orig.SizeProto() } type ProtoUnmarshaler struct{} func (d *ProtoUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) { td := NewTraces() - err := internal.UnmarshalProtoOrigExportTraceServiceRequest(td.getOrig(), buf) + err := td.getOrig().UnmarshalProto(buf) if err != nil { return Traces{}, err } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/generated_exportpartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/generated_exportpartialsuccess.go index d5f58207052..092d23c8d7d 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/generated_exportpartialsuccess.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/generated_exportpartialsuccess.go @@ -8,7 +8,6 @@ package ptraceotlp import ( "go.opentelemetry.io/collector/pdata/internal" - otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" ) // ExportPartialSuccess represents the details of a partially successful export request. @@ -19,11 +18,11 @@ import ( // Must use NewExportPartialSuccess function to create new instances. // Important: zero-initialized instance is not valid for use. type ExportPartialSuccess struct { - orig *otlpcollectortrace.ExportTracePartialSuccess + orig *internal.ExportTracePartialSuccess state *internal.State } -func newExportPartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, state *internal.State) ExportPartialSuccess { +func newExportPartialSuccess(orig *internal.ExportTracePartialSuccess, state *internal.State) ExportPartialSuccess { return ExportPartialSuccess{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newExportPartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExportPartialSuccess() ExportPartialSuccess { - return newExportPartialSuccess(internal.NewOrigExportTracePartialSuccess(), internal.NewState()) + return newExportPartialSuccess(internal.NewExportTracePartialSuccess(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) { if ms.orig == dest.orig { return } - internal.DeleteOrigExportTracePartialSuccess(dest.orig, false) + internal.DeleteExportTracePartialSuccess(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -73,5 +72,5 @@ func (ms ExportPartialSuccess) SetErrorMessage(v string) { // CopyTo copies all properties from the current struct overriding the destination. func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) { dest.state.AssertMutable() - internal.CopyOrigExportTracePartialSuccess(dest.orig, ms.orig) + internal.CopyExportTracePartialSuccess(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/generated_exportresponse.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/generated_exportresponse.go index be9d94eeafe..98e123c8e72 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/generated_exportresponse.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/generated_exportresponse.go @@ -8,7 +8,6 @@ package ptraceotlp import ( "go.opentelemetry.io/collector/pdata/internal" - otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" ) // ExportResponse represents the response for gRPC/HTTP client/server. @@ -19,11 +18,11 @@ import ( // Must use NewExportResponse function to create new instances. // Important: zero-initialized instance is not valid for use. type ExportResponse struct { - orig *otlpcollectortrace.ExportTraceServiceResponse + orig *internal.ExportTraceServiceResponse state *internal.State } -func newExportResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, state *internal.State) ExportResponse { +func newExportResponse(orig *internal.ExportTraceServiceResponse, state *internal.State) ExportResponse { return ExportResponse{orig: orig, state: state} } @@ -32,7 +31,7 @@ func newExportResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, stat // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExportResponse() ExportResponse { - return newExportResponse(internal.NewOrigExportTraceServiceResponse(), internal.NewState()) + return newExportResponse(internal.NewExportTraceServiceResponse(), internal.NewState()) } // MoveTo moves all properties from the current struct overriding the destination and @@ -44,7 +43,7 @@ func (ms ExportResponse) MoveTo(dest ExportResponse) { if ms.orig == dest.orig { return } - internal.DeleteOrigExportTraceServiceResponse(dest.orig, false) + internal.DeleteExportTraceServiceResponse(dest.orig, false) *dest.orig, *ms.orig = *ms.orig, *dest.orig } @@ -56,5 +55,5 @@ func (ms ExportResponse) PartialSuccess() ExportPartialSuccess { // CopyTo copies all properties from the current struct overriding the destination. func (ms ExportResponse) CopyTo(dest ExportResponse) { dest.state.AssertMutable() - internal.CopyOrigExportTraceServiceResponse(dest.orig, ms.orig) + internal.CopyExportTraceServiceResponse(dest.orig, ms.orig) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/grpc.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/grpc.go index 5479976abf6..1314a2e156f 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/grpc.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/grpc.go @@ -11,8 +11,7 @@ import ( "google.golang.org/grpc/status" "go.opentelemetry.io/collector/pdata/internal" - otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" - _ "go.opentelemetry.io/collector/pdata/internal/grpcencoding" // enforces custom gRPC encoding to be loaded. + "go.opentelemetry.io/collector/pdata/internal/otelgrpc" "go.opentelemetry.io/collector/pdata/internal/otlp" ) @@ -32,11 +31,11 @@ type GRPCClient interface { // NewGRPCClient returns a new GRPCClient connected using the given connection. func NewGRPCClient(cc *grpc.ClientConn) GRPCClient { - return &grpcClient{rawClient: otlpcollectortrace.NewTraceServiceClient(cc)} + return &grpcClient{rawClient: otelgrpc.NewTraceServiceClient(cc)} } type grpcClient struct { - rawClient otlpcollectortrace.TraceServiceClient + rawClient otelgrpc.TraceServiceClient } // Export implements the Client interface. @@ -76,14 +75,14 @@ func (*UnimplementedGRPCServer) unexported() {} // RegisterGRPCServer registers the GRPCServer to the grpc.Server. func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) { - otlpcollectortrace.RegisterTraceServiceServer(s, &rawTracesServer{srv: srv}) + otelgrpc.RegisterTraceServiceServer(s, &rawTracesServer{srv: srv}) } type rawTracesServer struct { srv GRPCServer } -func (s rawTracesServer) Export(ctx context.Context, request *otlpcollectortrace.ExportTraceServiceRequest) (*otlpcollectortrace.ExportTraceServiceResponse, error) { +func (s rawTracesServer) Export(ctx context.Context, request *internal.ExportTraceServiceRequest) (*internal.ExportTraceServiceResponse, error) { otlp.MigrateTraces(request.ResourceSpans) rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: internal.NewState()}) return rsp.orig, err diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/request.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/request.go index 0408713c57a..618c592584f 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/request.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/request.go @@ -7,7 +7,6 @@ import ( "slices" "go.opentelemetry.io/collector/pdata/internal" - otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" "go.opentelemetry.io/collector/pdata/ptrace" @@ -16,14 +15,14 @@ import ( // ExportRequest represents the request for gRPC/HTTP client/server. // It's a wrapper for ptrace.Traces data. type ExportRequest struct { - orig *otlpcollectortrace.ExportTraceServiceRequest + orig *internal.ExportTraceServiceRequest state *internal.State } // NewExportRequest returns an empty ExportRequest. func NewExportRequest() ExportRequest { return ExportRequest{ - orig: &otlpcollectortrace.ExportTraceServiceRequest{}, + orig: &internal.ExportTraceServiceRequest{}, state: internal.NewState(), } } @@ -33,22 +32,22 @@ func NewExportRequest() ExportRequest { // any changes to the provided Traces struct will be reflected in the ExportRequest and vice versa. func NewExportRequestFromTraces(td ptrace.Traces) ExportRequest { return ExportRequest{ - orig: internal.GetOrigTraces(internal.Traces(td)), - state: internal.GetTracesState(internal.Traces(td)), + orig: internal.GetTracesOrig(internal.TracesWrapper(td)), + state: internal.GetTracesState(internal.TracesWrapper(td)), } } // MarshalProto marshals ExportRequest into proto bytes. func (ms ExportRequest) MarshalProto() ([]byte, error) { - size := internal.SizeProtoOrigExportTraceServiceRequest(ms.orig) + size := ms.orig.SizeProto() buf := make([]byte, size) - _ = internal.MarshalProtoOrigExportTraceServiceRequest(ms.orig, buf) + _ = ms.orig.MarshalProto(buf) return buf, nil } // UnmarshalProto unmarshalls ExportRequest from proto bytes. func (ms ExportRequest) UnmarshalProto(data []byte) error { - err := internal.UnmarshalProtoOrigExportTraceServiceRequest(ms.orig, data) + err := ms.orig.UnmarshalProto(data) if err != nil { return err } @@ -60,7 +59,7 @@ func (ms ExportRequest) UnmarshalProto(data []byte) error { func (ms ExportRequest) MarshalJSON() ([]byte, error) { dest := json.BorrowStream(nil) defer json.ReturnStream(dest) - internal.MarshalJSONOrigExportTraceServiceRequest(ms.orig, dest) + ms.orig.MarshalJSON(dest) if dest.Error() != nil { return nil, dest.Error() } @@ -71,10 +70,10 @@ func (ms ExportRequest) MarshalJSON() ([]byte, error) { func (ms ExportRequest) UnmarshalJSON(data []byte) error { iter := json.BorrowIterator(data) defer json.ReturnIterator(iter) - internal.UnmarshalJSONOrigExportTraceServiceRequest(ms.orig, iter) + ms.orig.UnmarshalJSON(iter) return iter.Error() } func (ms ExportRequest) Traces() ptrace.Traces { - return ptrace.Traces(internal.NewTraces(ms.orig, ms.state)) + return ptrace.Traces(internal.NewTracesWrapper(ms.orig, ms.state)) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/response.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/response.go index 7285a6346d0..46264725929 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/response.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/response.go @@ -6,28 +6,27 @@ package ptraceotlp // import "go.opentelemetry.io/collector/pdata/ptrace/ptraceo import ( "slices" - "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/internal/json" ) // MarshalProto marshals ExportResponse into proto bytes. func (ms ExportResponse) MarshalProto() ([]byte, error) { - size := internal.SizeProtoOrigExportTraceServiceResponse(ms.orig) + size := ms.orig.SizeProto() buf := make([]byte, size) - _ = internal.MarshalProtoOrigExportTraceServiceResponse(ms.orig, buf) + _ = ms.orig.MarshalProto(buf) return buf, nil } // UnmarshalProto unmarshalls ExportResponse from proto bytes. func (ms ExportResponse) UnmarshalProto(data []byte) error { - return internal.UnmarshalProtoOrigExportTraceServiceResponse(ms.orig, data) + return ms.orig.UnmarshalProto(data) } // MarshalJSON marshals ExportResponse into JSON bytes. func (ms ExportResponse) MarshalJSON() ([]byte, error) { dest := json.BorrowStream(nil) defer json.ReturnStream(dest) - internal.MarshalJSONOrigExportTraceServiceResponse(ms.orig, dest) + ms.orig.MarshalJSON(dest) return slices.Clone(dest.Buffer()), dest.Error() } @@ -35,6 +34,6 @@ func (ms ExportResponse) MarshalJSON() ([]byte, error) { func (ms ExportResponse) UnmarshalJSON(data []byte) error { iter := json.BorrowIterator(data) defer json.ReturnIterator(iter) - internal.UnmarshalJSONOrigExportTraceServiceResponse(ms.orig, iter) + ms.orig.UnmarshalJSON(iter) return iter.Error() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go index 561d82cfffa..bb1702ffe15 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go @@ -4,7 +4,7 @@ package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" import ( - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" + "go.opentelemetry.io/collector/pdata/internal" ) // SpanKind is the type of span. Can be used to specify additional relationships between spans @@ -13,25 +13,25 @@ type SpanKind int32 const ( // SpanKindUnspecified represents that the SpanKind is unspecified, it MUST NOT be used. - SpanKindUnspecified = SpanKind(otlptrace.Span_SPAN_KIND_UNSPECIFIED) + SpanKindUnspecified = SpanKind(internal.SpanKind_SPAN_KIND_UNSPECIFIED) // SpanKindInternal indicates that the span represents an internal operation within an application, // as opposed to an operation happening at the boundaries. Default value. - SpanKindInternal = SpanKind(otlptrace.Span_SPAN_KIND_INTERNAL) + SpanKindInternal = SpanKind(internal.SpanKind_SPAN_KIND_INTERNAL) // SpanKindServer indicates that the span covers server-side handling of an RPC or other // remote network request. - SpanKindServer = SpanKind(otlptrace.Span_SPAN_KIND_SERVER) + SpanKindServer = SpanKind(internal.SpanKind_SPAN_KIND_SERVER) // SpanKindClient indicates that the span describes a request to some remote service. - SpanKindClient = SpanKind(otlptrace.Span_SPAN_KIND_CLIENT) + SpanKindClient = SpanKind(internal.SpanKind_SPAN_KIND_CLIENT) // SpanKindProducer indicates that the span describes a producer sending a message to a broker. // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship // between producer and consumer spans. // A PRODUCER span ends when the message was accepted by the broker while the logical processing of // the message might span a much longer time. - SpanKindProducer = SpanKind(otlptrace.Span_SPAN_KIND_PRODUCER) + SpanKindProducer = SpanKind(internal.SpanKind_SPAN_KIND_PRODUCER) // SpanKindConsumer indicates that the span describes consumer receiving a message from a broker. // Like the PRODUCER kind, there is often no direct critical path latency relationship between // producer and consumer spans. - SpanKindConsumer = SpanKind(otlptrace.Span_SPAN_KIND_CONSUMER) + SpanKindConsumer = SpanKind(internal.SpanKind_SPAN_KIND_CONSUMER) ) // String returns the string representation of the SpanKind. diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go index 18a21f56ba8..d1da464363e 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go @@ -4,7 +4,7 @@ package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" import ( - otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" + "go.opentelemetry.io/collector/pdata/internal" ) // StatusCode mirrors the codes defined at @@ -12,9 +12,9 @@ import ( type StatusCode int32 const ( - StatusCodeUnset = StatusCode(otlptrace.Status_STATUS_CODE_UNSET) - StatusCodeOk = StatusCode(otlptrace.Status_STATUS_CODE_OK) - StatusCodeError = StatusCode(otlptrace.Status_STATUS_CODE_ERROR) + StatusCodeUnset = StatusCode(internal.StatusCode_STATUS_CODE_UNSET) + StatusCodeOk = StatusCode(internal.StatusCode_STATUS_CODE_OK) + StatusCodeError = StatusCode(internal.StatusCode_STATUS_CODE_ERROR) ) // String returns the string representation of the StatusCode. diff --git a/vendor/go.opentelemetry.io/collector/pdata/testdata/profile.go b/vendor/go.opentelemetry.io/collector/pdata/testdata/profile.go index 92dfcaaeea2..d5eef46c335 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/testdata/profile.go +++ b/vendor/go.opentelemetry.io/collector/pdata/testdata/profile.go @@ -10,10 +10,7 @@ import ( "go.opentelemetry.io/collector/pdata/pprofile" ) -var ( - profileStartTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC)) - profileEndTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC)) -) +var profileStartTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC)) // GenerateProfiles generates dummy profiling data for tests func GenerateProfiles(profilesCount int) pprofile.Profiles { @@ -48,21 +45,18 @@ func GenerateProfiles(profilesCount int) pprofile.Profiles { func fillProfileOne(dic pprofile.ProfilesDictionary, profile pprofile.Profile) { profile.SetProfileID([16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}) profile.SetTime(profileStartTimestamp) - profile.SetDuration(profileEndTimestamp) + profile.SetDurationNano(uint64(time.Second.Nanoseconds())) profile.SetDroppedAttributesCount(1) - profile.PeriodType().SetAggregationTemporality(pprofile.AggregationTemporalityDelta) - - st := profile.SampleType() - st.SetAggregationTemporality(pprofile.AggregationTemporalityDelta) loc := pprofile.NewLocation() loc.SetAddress(1) - id, _ := pprofile.SetLocation(dic.LocationTable(), loc) - stack := dic.StackTable().AppendEmpty() - stack.LocationIndices().Append(id) + locID, _ := pprofile.SetLocation(dic.LocationTable(), loc) + stack := pprofile.NewStack() + stack.LocationIndices().Append(locID) + stackID, _ := pprofile.SetStack(dic.StackTable(), stack) - sample := profile.Sample().AppendEmpty() - sample.SetStackIndex(1) + sample := profile.Samples().AppendEmpty() + sample.SetStackIndex(stackID) sample.Values().Append(4) sample.AttributeIndices().Append(0) } @@ -70,20 +64,17 @@ func fillProfileOne(dic pprofile.ProfilesDictionary, profile pprofile.Profile) { func fillProfileTwo(dic pprofile.ProfilesDictionary, profile pprofile.Profile) { profile.SetProfileID([16]byte{0x02, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}) profile.SetTime(profileStartTimestamp) - profile.SetDuration(profileEndTimestamp) - profile.PeriodType().SetAggregationTemporality(pprofile.AggregationTemporalityDelta) - - st := profile.SampleType() - st.SetAggregationTemporality(pprofile.AggregationTemporalityDelta) + profile.SetDurationNano(uint64(time.Second.Nanoseconds())) loc := pprofile.NewLocation() loc.SetAddress(2) - id, _ := pprofile.SetLocation(dic.LocationTable(), loc) - stack := dic.StackTable().AppendEmpty() - stack.LocationIndices().Append(id) + locID, _ := pprofile.SetLocation(dic.LocationTable(), loc) + stack := pprofile.NewStack() + stack.LocationIndices().Append(locID) + stackID, _ := pprofile.SetStack(dic.StackTable(), stack) - sample := profile.Sample().AppendEmpty() - sample.SetStackIndex(1) + sample := profile.Samples().AppendEmpty() + sample.SetStackIndex(stackID) sample.Values().Append(9) sample.AttributeIndices().Append(0) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/logs.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/logs.go index 1b4f8202254..ef19e15be23 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/logs.go +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/logs.go @@ -13,23 +13,23 @@ import ( // MarkPipelineOwnedLogs marks the plog.Logs data as owned by the pipeline, returns true if the data were // previously not owned by the pipeline, otherwise false. func MarkPipelineOwnedLogs(ld plog.Logs) bool { - return internal.GetLogsState(internal.Logs(ld)).MarkPipelineOwned() + return internal.GetLogsState(internal.LogsWrapper(ld)).MarkPipelineOwned() } func RefLogs(ld plog.Logs) { if EnableRefCounting.IsEnabled() { - internal.GetLogsState(internal.Logs(ld)).Ref() + internal.GetLogsState(internal.LogsWrapper(ld)).Ref() } } func UnrefLogs(ld plog.Logs) { if EnableRefCounting.IsEnabled() { - if !internal.GetLogsState(internal.Logs(ld)).Unref() { + if !internal.GetLogsState(internal.LogsWrapper(ld)).Unref() { return } - // Don't call DeleteOrigExportLogsServiceRequest without the gate because we reset the data and that may still cause issues. + // Don't call DeleteExportLogsServiceRequest without the gate because we reset the data and that may still cause issues. if internal.UseProtoPooling.IsEnabled() { - internal.DeleteOrigExportLogsServiceRequest(internal.GetOrigLogs(internal.Logs(ld)), true) + internal.DeleteExportLogsServiceRequest(internal.GetLogsOrig(internal.LogsWrapper(ld)), true) } } } @@ -37,5 +37,5 @@ func UnrefLogs(ld plog.Logs) { // TODO: Generate this in pdata. func EqualLogs(ld1, ld2 plog.Logs) bool { - return reflect.DeepEqual(internal.GetOrigLogs(internal.Logs(ld1)), internal.GetOrigLogs(internal.Logs(ld2))) + return reflect.DeepEqual(internal.GetLogsOrig(internal.LogsWrapper(ld1)), internal.GetLogsOrig(internal.LogsWrapper(ld2))) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/metrics.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/metrics.go index 1bc6c3921d9..02610780dca 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/metrics.go +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/metrics.go @@ -13,23 +13,23 @@ import ( // MarkPipelineOwnedMetrics marks the pmetric.Metrics data as owned by the pipeline, returns true if the data were // previously not owned by the pipeline, otherwise false. func MarkPipelineOwnedMetrics(md pmetric.Metrics) bool { - return internal.GetMetricsState(internal.Metrics(md)).MarkPipelineOwned() + return internal.GetMetricsState(internal.MetricsWrapper(md)).MarkPipelineOwned() } func RefMetrics(md pmetric.Metrics) { if EnableRefCounting.IsEnabled() { - internal.GetMetricsState(internal.Metrics(md)).Ref() + internal.GetMetricsState(internal.MetricsWrapper(md)).Ref() } } func UnrefMetrics(md pmetric.Metrics) { if EnableRefCounting.IsEnabled() { - if !internal.GetMetricsState(internal.Metrics(md)).Unref() { + if !internal.GetMetricsState(internal.MetricsWrapper(md)).Unref() { return } - // Don't call DeleteOrigExportLogsServiceRequest without the gate because we reset the data and that may still cause issues. + // Don't call DeleteExportLogsServiceRequest without the gate because we reset the data and that may still cause issues. if internal.UseProtoPooling.IsEnabled() { - internal.DeleteOrigExportMetricsServiceRequest(internal.GetOrigMetrics(internal.Metrics(md)), true) + internal.DeleteExportMetricsServiceRequest(internal.GetMetricsOrig(internal.MetricsWrapper(md)), true) } } } @@ -37,5 +37,5 @@ func UnrefMetrics(md pmetric.Metrics) { // TODO: Generate this in pdata. func EqualMetrics(md1, md2 pmetric.Metrics) bool { - return reflect.DeepEqual(internal.GetOrigMetrics(internal.Metrics(md1)), internal.GetOrigMetrics(internal.Metrics(md2))) + return reflect.DeepEqual(internal.GetMetricsOrig(internal.MetricsWrapper(md1)), internal.GetMetricsOrig(internal.MetricsWrapper(md2))) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/profiles.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/profiles.go index 28e2d07f0dd..b4a6ad70877 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/profiles.go +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/profiles.go @@ -13,23 +13,23 @@ import ( // MarkPipelineOwnedProfiles marks the pprofile.Profiles data as owned by the pipeline, returns true if the data were // previously not owned by the pipeline, otherwise false. func MarkPipelineOwnedProfiles(pd pprofile.Profiles) bool { - return internal.GetProfilesState(internal.Profiles(pd)).MarkPipelineOwned() + return internal.GetProfilesState(internal.ProfilesWrapper(pd)).MarkPipelineOwned() } func RefProfiles(pd pprofile.Profiles) { if EnableRefCounting.IsEnabled() { - internal.GetProfilesState(internal.Profiles(pd)).Ref() + internal.GetProfilesState(internal.ProfilesWrapper(pd)).Ref() } } func UnrefProfiles(pd pprofile.Profiles) { if EnableRefCounting.IsEnabled() { - if !internal.GetProfilesState(internal.Profiles(pd)).Unref() { + if !internal.GetProfilesState(internal.ProfilesWrapper(pd)).Unref() { return } - // Don't call DeleteOrigExportLogsServiceRequest without the gate because we reset the data and that may still cause issues. + // Don't call DeleteExportLogsServiceRequest without the gate because we reset the data and that may still cause issues. if internal.UseProtoPooling.IsEnabled() { - internal.DeleteOrigExportProfilesServiceRequest(internal.GetOrigProfiles(internal.Profiles(pd)), true) + internal.DeleteExportProfilesServiceRequest(internal.GetProfilesOrig(internal.ProfilesWrapper(pd)), true) } } } @@ -37,5 +37,5 @@ func UnrefProfiles(pd pprofile.Profiles) { // TODO: Generate this in pdata. func EqualProfiles(pd1, pd2 pprofile.Profiles) bool { - return reflect.DeepEqual(internal.GetOrigProfiles(internal.Profiles(pd1)), internal.GetOrigProfiles(internal.Profiles(pd2))) + return reflect.DeepEqual(internal.GetProfilesOrig(internal.ProfilesWrapper(pd1)), internal.GetProfilesOrig(internal.ProfilesWrapper(pd2))) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/traces.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/traces.go index 539951fcf7c..5f21b6a2e0f 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/traces.go +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/pref/traces.go @@ -13,21 +13,21 @@ import ( // MarkPipelineOwnedTraces marks the ptrace.Traces data as owned by the pipeline, returns true if the data were // previously not owned by the pipeline, otherwise false. func MarkPipelineOwnedTraces(td ptrace.Traces) bool { - return internal.GetTracesState(internal.Traces(td)).MarkPipelineOwned() + return internal.GetTracesState(internal.TracesWrapper(td)).MarkPipelineOwned() } func RefTraces(td ptrace.Traces) { - internal.GetTracesState(internal.Traces(td)).Ref() + internal.GetTracesState(internal.TracesWrapper(td)).Ref() } func UnrefTraces(td ptrace.Traces) { if EnableRefCounting.IsEnabled() { - if !internal.GetTracesState(internal.Traces(td)).Unref() { + if !internal.GetTracesState(internal.TracesWrapper(td)).Unref() { return } - // Don't call DeleteOrigExportLogsServiceRequest without the gate because we reset the data and that may still cause issues. + // Don't call DeleteExportLogsServiceRequest without the gate because we reset the data and that may still cause issues. if internal.UseProtoPooling.IsEnabled() { - internal.DeleteOrigExportTraceServiceRequest(internal.GetOrigTraces(internal.Traces(td)), true) + internal.DeleteExportTraceServiceRequest(internal.GetTracesOrig(internal.TracesWrapper(td)), true) } } } @@ -35,5 +35,5 @@ func UnrefTraces(td ptrace.Traces) { // TODO: Generate this in pdata. func EqualTraces(td1, td2 ptrace.Traces) bool { - return reflect.DeepEqual(internal.GetOrigTraces(internal.Traces(td1)), internal.GetOrigTraces(internal.Traces(td2))) + return reflect.DeepEqual(internal.GetTracesOrig(internal.TracesWrapper(td1)), internal.GetTracesOrig(internal.TracesWrapper(td2))) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/context.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/context.go index 7ea1df2cfb7..eaa027bb685 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/context.go +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/context.go @@ -10,17 +10,16 @@ import ( "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/client" - otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - "go.opentelemetry.io/collector/pdata/xpdata/request/internal" + "go.opentelemetry.io/collector/pdata/internal" ) // encodeContext encodes the context into a map of strings. -func encodeContext(ctx context.Context) internal.RequestContext { +func encodeContext(ctx context.Context) *internal.RequestContext { rc := internal.RequestContext{} encodeSpanContext(ctx, &rc) encodeClientMetadata(ctx, &rc) encodeClientAddress(ctx, &rc) - return rc + return &rc } func encodeSpanContext(ctx context.Context, rc *internal.RequestContext) { @@ -28,11 +27,9 @@ func encodeSpanContext(ctx context.Context, rc *internal.RequestContext) { if !spanCtx.IsValid() { return } - traceID := spanCtx.TraceID() - spanID := spanCtx.SpanID() rc.SpanContext = &internal.SpanContext{ - TraceId: traceID[:], - SpanId: spanID[:], + TraceID: internal.TraceID(spanCtx.TraceID()), + SpanID: internal.SpanID(spanCtx.SpanID()), TraceFlags: uint32(spanCtx.TraceFlags()), TraceState: spanCtx.TraceState().String(), Remote: spanCtx.IsRemote(), @@ -45,18 +42,18 @@ func encodeClientMetadata(ctx context.Context, rc *internal.RequestContext) { vals := clientMetadata.Get(k) switch len(vals) { case 1: - rc.ClientMetadata = append(rc.ClientMetadata, otlpcommon.KeyValue{ + rc.ClientMetadata = append(rc.ClientMetadata, internal.KeyValue{ Key: k, - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: vals[0]}}, + Value: internal.AnyValue{Value: &internal.AnyValue_StringValue{StringValue: vals[0]}}, }) default: - metadataArray := make([]otlpcommon.AnyValue, 0, len(vals)) + metadataArray := make([]internal.AnyValue, 0, len(vals)) for i := range vals { - metadataArray = append(metadataArray, otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: vals[i]}}) + metadataArray = append(metadataArray, internal.AnyValue{Value: &internal.AnyValue_StringValue{StringValue: vals[i]}}) } - rc.ClientMetadata = append(rc.ClientMetadata, otlpcommon.KeyValue{ + rc.ClientMetadata = append(rc.ClientMetadata, internal.KeyValue{ Key: k, - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_ArrayValue{ArrayValue: &otlpcommon.ArrayValue{Values: metadataArray}}}, + Value: internal.AnyValue{Value: &internal.AnyValue_ArrayValue{ArrayValue: &internal.ArrayValue{Values: metadataArray}}}, }) } } @@ -65,19 +62,19 @@ func encodeClientMetadata(ctx context.Context, rc *internal.RequestContext) { func encodeClientAddress(ctx context.Context, rc *internal.RequestContext) { switch a := client.FromContext(ctx).Addr.(type) { case *net.IPAddr: - rc.ClientAddress = &internal.RequestContext_Ip{Ip: &internal.IPAddr{ - Ip: a.IP, + rc.ClientAddress = &internal.RequestContext_IP{IP: &internal.IPAddr{ + IP: a.IP, Zone: a.Zone, }} case *net.TCPAddr: - rc.ClientAddress = &internal.RequestContext_Tcp{Tcp: &internal.TCPAddr{ - Ip: a.IP, + rc.ClientAddress = &internal.RequestContext_TCP{TCP: &internal.TCPAddr{ + IP: a.IP, Port: int64(a.Port), Zone: a.Zone, }} case *net.UDPAddr: - rc.ClientAddress = &internal.RequestContext_Udp{Udp: &internal.UDPAddr{ - Ip: a.IP, + rc.ClientAddress = &internal.RequestContext_UDP{UDP: &internal.UDPAddr{ + IP: a.IP, Port: int64(a.Port), Zone: a.Zone, }} @@ -110,10 +107,8 @@ func decodeSpanContext(ctx context.Context, sc *internal.SpanContext) context.Co if sc == nil { return ctx } - traceID := trace.TraceID{} - copy(traceID[:], sc.TraceId) - spanID := trace.SpanID{} - copy(spanID[:], sc.SpanId) + traceID := trace.TraceID(sc.TraceID) + spanID := trace.SpanID(sc.SpanID) traceState, _ := trace.ParseTraceState(sc.TraceState) return trace.ContextWithSpanContext(context.Background(), trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, @@ -124,17 +119,17 @@ func decodeSpanContext(ctx context.Context, sc *internal.SpanContext) context.Co })) } -func decodeClientMetadata(clientMetadata []otlpcommon.KeyValue) map[string][]string { +func decodeClientMetadata(clientMetadata []internal.KeyValue) map[string][]string { if len(clientMetadata) == 0 { return nil } metadataMap := make(map[string][]string, len(clientMetadata)) for _, kv := range clientMetadata { switch val := kv.Value.Value.(type) { - case *otlpcommon.AnyValue_StringValue: + case *internal.AnyValue_StringValue: metadataMap[kv.Key] = make([]string, 1) metadataMap[kv.Key][0] = val.StringValue - case *otlpcommon.AnyValue_ArrayValue: + case *internal.AnyValue_ArrayValue: metadataMap[kv.Key] = make([]string, len(val.ArrayValue.Values)) for i, v := range val.ArrayValue.Values { metadataMap[kv.Key][i] = v.GetStringValue() @@ -146,22 +141,22 @@ func decodeClientMetadata(clientMetadata []otlpcommon.KeyValue) map[string][]str func decodeClientAddress(rc *internal.RequestContext) net.Addr { switch a := rc.ClientAddress.(type) { - case *internal.RequestContext_Ip: + case *internal.RequestContext_IP: return &net.IPAddr{ - IP: a.Ip.Ip, - Zone: a.Ip.Zone, + IP: a.IP.IP, + Zone: a.IP.Zone, } - case *internal.RequestContext_Tcp: + case *internal.RequestContext_TCP: return &net.TCPAddr{ - IP: a.Tcp.Ip, - Port: int(a.Tcp.Port), - Zone: a.Tcp.Zone, + IP: a.TCP.IP, + Port: int(a.TCP.Port), + Zone: a.TCP.Zone, } - case *internal.RequestContext_Udp: + case *internal.RequestContext_UDP: return &net.UDPAddr{ - IP: a.Udp.Ip, - Port: int(a.Udp.Port), - Zone: a.Udp.Zone, + IP: a.UDP.IP, + Port: int(a.UDP.Port), + Zone: a.UDP.Zone, } case *internal.RequestContext_Unix: return &net.UnixAddr{ diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/internal/request.pb.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/internal/request.pb.go deleted file mode 100644 index 524ffee0255..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/internal/request.pb.go +++ /dev/null @@ -1,3167 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: pdata/xpdata/request/internal/request.proto - -package internal - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - - v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - v13 "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" - v12 "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" - v1development "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" - v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// SpanContext represents a span context encoded associated with a telemetry export request. -type SpanContext struct { - TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - TraceFlags uint32 `protobuf:"fixed32,3,opt,name=trace_flags,json=traceFlags,proto3" json:"trace_flags,omitempty"` - TraceState string `protobuf:"bytes,4,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` - Remote bool `protobuf:"varint,5,opt,name=remote,proto3" json:"remote,omitempty"` -} - -func (m *SpanContext) Reset() { *m = SpanContext{} } -func (m *SpanContext) String() string { return proto.CompactTextString(m) } -func (*SpanContext) ProtoMessage() {} -func (*SpanContext) Descriptor() ([]byte, []int) { - return fileDescriptor_b47c551a6764db21, []int{0} -} -func (m *SpanContext) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SpanContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SpanContext.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SpanContext) XXX_Merge(src proto.Message) { - xxx_messageInfo_SpanContext.Merge(m, src) -} -func (m *SpanContext) XXX_Size() int { - return m.Size() -} -func (m *SpanContext) XXX_DiscardUnknown() { - xxx_messageInfo_SpanContext.DiscardUnknown(m) -} - -var xxx_messageInfo_SpanContext proto.InternalMessageInfo - -func (m *SpanContext) GetTraceId() []byte { - if m != nil { - return m.TraceId - } - return nil -} - -func (m *SpanContext) GetSpanId() []byte { - if m != nil { - return m.SpanId - } - return nil -} - -func (m *SpanContext) GetTraceFlags() uint32 { - if m != nil { - return m.TraceFlags - } - return 0 -} - -func (m *SpanContext) GetTraceState() string { - if m != nil { - return m.TraceState - } - return "" -} - -func (m *SpanContext) GetRemote() bool { - if m != nil { - return m.Remote - } - return false -} - -type IPAddr struct { - Ip []byte `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` -} - -func (m *IPAddr) Reset() { *m = IPAddr{} } -func (m *IPAddr) String() string { return proto.CompactTextString(m) } -func (*IPAddr) ProtoMessage() {} -func (*IPAddr) Descriptor() ([]byte, []int) { - return fileDescriptor_b47c551a6764db21, []int{1} -} -func (m *IPAddr) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IPAddr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_IPAddr.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *IPAddr) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddr.Merge(m, src) -} -func (m *IPAddr) XXX_Size() int { - return m.Size() -} -func (m *IPAddr) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddr.DiscardUnknown(m) -} - -var xxx_messageInfo_IPAddr proto.InternalMessageInfo - -func (m *IPAddr) GetIp() []byte { - if m != nil { - return m.Ip - } - return nil -} - -func (m *IPAddr) GetZone() string { - if m != nil { - return m.Zone - } - return "" -} - -type TCPAddr struct { - Ip []byte `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` - Port int64 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - Zone string `protobuf:"bytes,3,opt,name=zone,proto3" json:"zone,omitempty"` -} - -func (m *TCPAddr) Reset() { *m = TCPAddr{} } -func (m *TCPAddr) String() string { return proto.CompactTextString(m) } -func (*TCPAddr) ProtoMessage() {} -func (*TCPAddr) Descriptor() ([]byte, []int) { - return fileDescriptor_b47c551a6764db21, []int{2} -} -func (m *TCPAddr) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TCPAddr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TCPAddr.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TCPAddr) XXX_Merge(src proto.Message) { - xxx_messageInfo_TCPAddr.Merge(m, src) -} -func (m *TCPAddr) XXX_Size() int { - return m.Size() -} -func (m *TCPAddr) XXX_DiscardUnknown() { - xxx_messageInfo_TCPAddr.DiscardUnknown(m) -} - -var xxx_messageInfo_TCPAddr proto.InternalMessageInfo - -func (m *TCPAddr) GetIp() []byte { - if m != nil { - return m.Ip - } - return nil -} - -func (m *TCPAddr) GetPort() int64 { - if m != nil { - return m.Port - } - return 0 -} - -func (m *TCPAddr) GetZone() string { - if m != nil { - return m.Zone - } - return "" -} - -type UDPAddr struct { - Ip []byte `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` - Port int64 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - Zone string `protobuf:"bytes,3,opt,name=zone,proto3" json:"zone,omitempty"` -} - -func (m *UDPAddr) Reset() { *m = UDPAddr{} } -func (m *UDPAddr) String() string { return proto.CompactTextString(m) } -func (*UDPAddr) ProtoMessage() {} -func (*UDPAddr) Descriptor() ([]byte, []int) { - return fileDescriptor_b47c551a6764db21, []int{3} -} -func (m *UDPAddr) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UDPAddr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UDPAddr.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UDPAddr) XXX_Merge(src proto.Message) { - xxx_messageInfo_UDPAddr.Merge(m, src) -} -func (m *UDPAddr) XXX_Size() int { - return m.Size() -} -func (m *UDPAddr) XXX_DiscardUnknown() { - xxx_messageInfo_UDPAddr.DiscardUnknown(m) -} - -var xxx_messageInfo_UDPAddr proto.InternalMessageInfo - -func (m *UDPAddr) GetIp() []byte { - if m != nil { - return m.Ip - } - return nil -} - -func (m *UDPAddr) GetPort() int64 { - if m != nil { - return m.Port - } - return 0 -} - -func (m *UDPAddr) GetZone() string { - if m != nil { - return m.Zone - } - return "" -} - -type UnixAddr struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Net string `protobuf:"bytes,2,opt,name=net,proto3" json:"net,omitempty"` -} - -func (m *UnixAddr) Reset() { *m = UnixAddr{} } -func (m *UnixAddr) String() string { return proto.CompactTextString(m) } -func (*UnixAddr) ProtoMessage() {} -func (*UnixAddr) Descriptor() ([]byte, []int) { - return fileDescriptor_b47c551a6764db21, []int{4} -} -func (m *UnixAddr) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UnixAddr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UnixAddr.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UnixAddr) XXX_Merge(src proto.Message) { - xxx_messageInfo_UnixAddr.Merge(m, src) -} -func (m *UnixAddr) XXX_Size() int { - return m.Size() -} -func (m *UnixAddr) XXX_DiscardUnknown() { - xxx_messageInfo_UnixAddr.DiscardUnknown(m) -} - -var xxx_messageInfo_UnixAddr proto.InternalMessageInfo - -func (m *UnixAddr) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *UnixAddr) GetNet() string { - if m != nil { - return m.Net - } - return "" -} - -// RequestContext represents metadata associated with a telemetry export request. -type RequestContext struct { - SpanContext *SpanContext `protobuf:"bytes,1,opt,name=span_context,json=spanContext,proto3" json:"span_context,omitempty"` - // ClientMetadata contains additional metadata about the client making the request. - ClientMetadata []v1.KeyValue `protobuf:"bytes,2,rep,name=client_metadata,json=clientMetadata,proto3" json:"client_metadata"` - // ClientAddress contains the address of the client making the request. - // - // Types that are valid to be assigned to ClientAddress: - // *RequestContext_Ip - // *RequestContext_Tcp - // *RequestContext_Udp - // *RequestContext_Unix - ClientAddress isRequestContext_ClientAddress `protobuf_oneof:"client_address"` -} - -func (m *RequestContext) Reset() { *m = RequestContext{} } -func (m *RequestContext) String() string { return proto.CompactTextString(m) } -func (*RequestContext) ProtoMessage() {} -func (*RequestContext) Descriptor() ([]byte, []int) { - return fileDescriptor_b47c551a6764db21, []int{5} -} -func (m *RequestContext) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestContext.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestContext) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestContext.Merge(m, src) -} -func (m *RequestContext) XXX_Size() int { - return m.Size() -} -func (m *RequestContext) XXX_DiscardUnknown() { - xxx_messageInfo_RequestContext.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestContext proto.InternalMessageInfo - -type isRequestContext_ClientAddress interface { - isRequestContext_ClientAddress() - MarshalTo([]byte) (int, error) - Size() int -} - -type RequestContext_Ip struct { - Ip *IPAddr `protobuf:"bytes,3,opt,name=ip,proto3,oneof" json:"ip,omitempty"` -} -type RequestContext_Tcp struct { - Tcp *TCPAddr `protobuf:"bytes,4,opt,name=tcp,proto3,oneof" json:"tcp,omitempty"` -} -type RequestContext_Udp struct { - Udp *UDPAddr `protobuf:"bytes,5,opt,name=udp,proto3,oneof" json:"udp,omitempty"` -} -type RequestContext_Unix struct { - Unix *UnixAddr `protobuf:"bytes,6,opt,name=unix,proto3,oneof" json:"unix,omitempty"` -} - -func (*RequestContext_Ip) isRequestContext_ClientAddress() {} -func (*RequestContext_Tcp) isRequestContext_ClientAddress() {} -func (*RequestContext_Udp) isRequestContext_ClientAddress() {} -func (*RequestContext_Unix) isRequestContext_ClientAddress() {} - -func (m *RequestContext) GetClientAddress() isRequestContext_ClientAddress { - if m != nil { - return m.ClientAddress - } - return nil -} - -func (m *RequestContext) GetSpanContext() *SpanContext { - if m != nil { - return m.SpanContext - } - return nil -} - -func (m *RequestContext) GetClientMetadata() []v1.KeyValue { - if m != nil { - return m.ClientMetadata - } - return nil -} - -func (m *RequestContext) GetIp() *IPAddr { - if x, ok := m.GetClientAddress().(*RequestContext_Ip); ok { - return x.Ip - } - return nil -} - -func (m *RequestContext) GetTcp() *TCPAddr { - if x, ok := m.GetClientAddress().(*RequestContext_Tcp); ok { - return x.Tcp - } - return nil -} - -func (m *RequestContext) GetUdp() *UDPAddr { - if x, ok := m.GetClientAddress().(*RequestContext_Udp); ok { - return x.Udp - } - return nil -} - -func (m *RequestContext) GetUnix() *UnixAddr { - if x, ok := m.GetClientAddress().(*RequestContext_Unix); ok { - return x.Unix - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*RequestContext) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*RequestContext_Ip)(nil), - (*RequestContext_Tcp)(nil), - (*RequestContext_Udp)(nil), - (*RequestContext_Unix)(nil), - } -} - -type TracesRequest struct { - FormatVersion uint32 `protobuf:"fixed32,1,opt,name=format_version,json=formatVersion,proto3" json:"format_version,omitempty"` - RequestContext *RequestContext `protobuf:"bytes,2,opt,name=request_context,json=requestContext,proto3" json:"request_context,omitempty"` - TracesData *v11.TracesData `protobuf:"bytes,3,opt,name=traces_data,json=tracesData,proto3" json:"traces_data,omitempty"` -} - -func (m *TracesRequest) Reset() { *m = TracesRequest{} } -func (m *TracesRequest) String() string { return proto.CompactTextString(m) } -func (*TracesRequest) ProtoMessage() {} -func (*TracesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b47c551a6764db21, []int{6} -} -func (m *TracesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TracesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TracesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TracesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_TracesRequest.Merge(m, src) -} -func (m *TracesRequest) XXX_Size() int { - return m.Size() -} -func (m *TracesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_TracesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_TracesRequest proto.InternalMessageInfo - -func (m *TracesRequest) GetFormatVersion() uint32 { - if m != nil { - return m.FormatVersion - } - return 0 -} - -func (m *TracesRequest) GetRequestContext() *RequestContext { - if m != nil { - return m.RequestContext - } - return nil -} - -func (m *TracesRequest) GetTracesData() *v11.TracesData { - if m != nil { - return m.TracesData - } - return nil -} - -type MetricsRequest struct { - FormatVersion uint32 `protobuf:"fixed32,1,opt,name=format_version,json=formatVersion,proto3" json:"format_version,omitempty"` - RequestContext *RequestContext `protobuf:"bytes,2,opt,name=request_context,json=requestContext,proto3" json:"request_context,omitempty"` - MetricsData *v12.MetricsData `protobuf:"bytes,3,opt,name=metrics_data,json=metricsData,proto3" json:"metrics_data,omitempty"` -} - -func (m *MetricsRequest) Reset() { *m = MetricsRequest{} } -func (m *MetricsRequest) String() string { return proto.CompactTextString(m) } -func (*MetricsRequest) ProtoMessage() {} -func (*MetricsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b47c551a6764db21, []int{7} -} -func (m *MetricsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsRequest.Merge(m, src) -} -func (m *MetricsRequest) XXX_Size() int { - return m.Size() -} -func (m *MetricsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricsRequest proto.InternalMessageInfo - -func (m *MetricsRequest) GetFormatVersion() uint32 { - if m != nil { - return m.FormatVersion - } - return 0 -} - -func (m *MetricsRequest) GetRequestContext() *RequestContext { - if m != nil { - return m.RequestContext - } - return nil -} - -func (m *MetricsRequest) GetMetricsData() *v12.MetricsData { - if m != nil { - return m.MetricsData - } - return nil -} - -type LogsRequest struct { - FormatVersion uint32 `protobuf:"fixed32,1,opt,name=format_version,json=formatVersion,proto3" json:"format_version,omitempty"` - RequestContext *RequestContext `protobuf:"bytes,2,opt,name=request_context,json=requestContext,proto3" json:"request_context,omitempty"` - LogsData *v13.LogsData `protobuf:"bytes,3,opt,name=logs_data,json=logsData,proto3" json:"logs_data,omitempty"` -} - -func (m *LogsRequest) Reset() { *m = LogsRequest{} } -func (m *LogsRequest) String() string { return proto.CompactTextString(m) } -func (*LogsRequest) ProtoMessage() {} -func (*LogsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b47c551a6764db21, []int{8} -} -func (m *LogsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LogsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LogsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LogsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogsRequest.Merge(m, src) -} -func (m *LogsRequest) XXX_Size() int { - return m.Size() -} -func (m *LogsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LogsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LogsRequest proto.InternalMessageInfo - -func (m *LogsRequest) GetFormatVersion() uint32 { - if m != nil { - return m.FormatVersion - } - return 0 -} - -func (m *LogsRequest) GetRequestContext() *RequestContext { - if m != nil { - return m.RequestContext - } - return nil -} - -func (m *LogsRequest) GetLogsData() *v13.LogsData { - if m != nil { - return m.LogsData - } - return nil -} - -type ProfilesRequest struct { - FormatVersion uint32 `protobuf:"fixed32,1,opt,name=format_version,json=formatVersion,proto3" json:"format_version,omitempty"` - RequestContext *RequestContext `protobuf:"bytes,2,opt,name=request_context,json=requestContext,proto3" json:"request_context,omitempty"` - ProfilesData *v1development.ProfilesData `protobuf:"bytes,3,opt,name=profiles_data,json=profilesData,proto3" json:"profiles_data,omitempty"` -} - -func (m *ProfilesRequest) Reset() { *m = ProfilesRequest{} } -func (m *ProfilesRequest) String() string { return proto.CompactTextString(m) } -func (*ProfilesRequest) ProtoMessage() {} -func (*ProfilesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b47c551a6764db21, []int{9} -} -func (m *ProfilesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProfilesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProfilesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProfilesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProfilesRequest.Merge(m, src) -} -func (m *ProfilesRequest) XXX_Size() int { - return m.Size() -} -func (m *ProfilesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ProfilesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ProfilesRequest proto.InternalMessageInfo - -func (m *ProfilesRequest) GetFormatVersion() uint32 { - if m != nil { - return m.FormatVersion - } - return 0 -} - -func (m *ProfilesRequest) GetRequestContext() *RequestContext { - if m != nil { - return m.RequestContext - } - return nil -} - -func (m *ProfilesRequest) GetProfilesData() *v1development.ProfilesData { - if m != nil { - return m.ProfilesData - } - return nil -} - -func init() { - proto.RegisterType((*SpanContext)(nil), "opentelemetry.collector.pdata.xpdata.internal.SpanContext") - proto.RegisterType((*IPAddr)(nil), "opentelemetry.collector.pdata.xpdata.internal.IPAddr") - proto.RegisterType((*TCPAddr)(nil), "opentelemetry.collector.pdata.xpdata.internal.TCPAddr") - proto.RegisterType((*UDPAddr)(nil), "opentelemetry.collector.pdata.xpdata.internal.UDPAddr") - proto.RegisterType((*UnixAddr)(nil), "opentelemetry.collector.pdata.xpdata.internal.UnixAddr") - proto.RegisterType((*RequestContext)(nil), "opentelemetry.collector.pdata.xpdata.internal.RequestContext") - proto.RegisterType((*TracesRequest)(nil), "opentelemetry.collector.pdata.xpdata.internal.TracesRequest") - proto.RegisterType((*MetricsRequest)(nil), "opentelemetry.collector.pdata.xpdata.internal.MetricsRequest") - proto.RegisterType((*LogsRequest)(nil), "opentelemetry.collector.pdata.xpdata.internal.LogsRequest") - proto.RegisterType((*ProfilesRequest)(nil), "opentelemetry.collector.pdata.xpdata.internal.ProfilesRequest") -} - -func init() { - proto.RegisterFile("pdata/xpdata/request/internal/request.proto", fileDescriptor_b47c551a6764db21) -} - -var fileDescriptor_b47c551a6764db21 = []byte{ - // 783 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0x8d, 0x93, 0x90, 0x8f, 0x75, 0x9a, 0x56, 0x16, 0x82, 0xd0, 0x43, 0x1a, 0x59, 0x2a, 0x44, - 0xb4, 0x38, 0xa4, 0x15, 0x9f, 0x12, 0x87, 0xa6, 0x15, 0x34, 0x40, 0x50, 0xe5, 0x7e, 0x1c, 0x90, - 0xaa, 0x68, 0xb1, 0x37, 0x91, 0x25, 0x7b, 0xd7, 0xd8, 0x9b, 0x28, 0xe5, 0x1f, 0x70, 0xe3, 0xce, - 0x1f, 0xea, 0xb1, 0x47, 0x4e, 0xa8, 0x6a, 0x4f, 0x9c, 0xf8, 0x09, 0xa0, 0x9d, 0xdd, 0xa4, 0x49, - 0x65, 0x0e, 0x11, 0x97, 0x9e, 0x32, 0x3b, 0x7e, 0xf3, 0x3c, 0x6f, 0xf7, 0x79, 0x27, 0x68, 0x2d, - 0x74, 0x31, 0xc7, 0x8d, 0x91, 0xfc, 0x89, 0xc8, 0xe7, 0x01, 0x89, 0x79, 0xc3, 0xa3, 0x9c, 0x44, - 0x14, 0xfb, 0xe3, 0x84, 0x15, 0x46, 0x8c, 0x33, 0xe3, 0x11, 0x0b, 0x09, 0xe5, 0xc4, 0x27, 0x01, - 0xe1, 0xd1, 0x89, 0xe5, 0x30, 0xdf, 0x27, 0x0e, 0x67, 0x91, 0x05, 0xd5, 0x96, 0x24, 0xb1, 0xc6, - 0xc5, 0xcb, 0xb7, 0xfb, 0xac, 0xcf, 0xa0, 0xb2, 0x21, 0x22, 0x49, 0xb2, 0x5c, 0x9f, 0x21, 0x69, - 0xc8, 0xe7, 0x3c, 0xc2, 0x0e, 0x69, 0x0c, 0x9b, 0x32, 0x50, 0xc8, 0xf5, 0x24, 0xa4, 0x88, 0x3d, - 0x27, 0x16, 0x58, 0x15, 0x2a, 0xf4, 0xfd, 0x24, 0xb4, 0xcf, 0xfa, 0x00, 0x15, 0xbf, 0x0a, 0xf7, - 0x30, 0x09, 0xe7, 0xb0, 0x20, 0x60, 0x54, 0x20, 0x65, 0xa4, 0xb0, 0x2f, 0x92, 0xb0, 0x61, 0xc4, - 0x7a, 0x9e, 0x4f, 0x04, 0xaf, 0x4b, 0x86, 0xc4, 0x67, 0x61, 0x40, 0x28, 0x9f, 0xa4, 0x65, 0xa9, - 0xf9, 0x5d, 0x43, 0xfa, 0x7e, 0x88, 0xe9, 0x36, 0xa3, 0x9c, 0x8c, 0xb8, 0x71, 0x0f, 0x15, 0x40, - 0x5b, 0xd7, 0x73, 0x2b, 0x5a, 0x4d, 0xab, 0x97, 0xec, 0x3c, 0xac, 0xdb, 0xae, 0x71, 0x17, 0xe5, - 0xe3, 0x10, 0x53, 0xf1, 0x24, 0x0d, 0x4f, 0x72, 0x62, 0xd9, 0x76, 0x8d, 0x15, 0xa4, 0xcb, 0x9a, - 0x9e, 0x8f, 0xfb, 0x71, 0x25, 0x53, 0xd3, 0xea, 0x79, 0x1b, 0x41, 0xea, 0xb5, 0xc8, 0x5c, 0x01, - 0x62, 0x8e, 0x39, 0xa9, 0x64, 0x6b, 0x5a, 0xbd, 0xa8, 0x00, 0xfb, 0x22, 0x63, 0xdc, 0x41, 0xb9, - 0x88, 0x04, 0x8c, 0x93, 0xca, 0xad, 0x9a, 0x56, 0x2f, 0xd8, 0x6a, 0x65, 0xae, 0xa3, 0x5c, 0x7b, - 0x6f, 0xcb, 0x75, 0x23, 0xa3, 0x8c, 0xd2, 0x5e, 0xa8, 0x3a, 0x4a, 0x7b, 0xa1, 0x61, 0xa0, 0xec, - 0x17, 0x46, 0x09, 0x74, 0x52, 0xb4, 0x21, 0x36, 0xb7, 0x50, 0xfe, 0x60, 0xfb, 0x9f, 0xf0, 0x90, - 0x45, 0x1c, 0xe0, 0x19, 0x1b, 0xe2, 0x09, 0x45, 0x66, 0x96, 0xe2, 0x70, 0xe7, 0xff, 0x28, 0x1e, - 0xa3, 0xc2, 0x21, 0xf5, 0x46, 0xc0, 0x61, 0xa0, 0x2c, 0xc5, 0x01, 0x01, 0x96, 0xa2, 0x0d, 0xb1, - 0xb1, 0x84, 0x32, 0x94, 0x70, 0xd5, 0xb8, 0x08, 0xcd, 0xaf, 0x59, 0x54, 0xb6, 0xa5, 0x83, 0xc7, - 0xc7, 0x70, 0x8c, 0x4a, 0xb0, 0xd7, 0x8e, 0x5c, 0x03, 0x81, 0xbe, 0xf1, 0xd2, 0x9a, 0xcb, 0xd9, - 0xd6, 0xd4, 0xc1, 0xda, 0x7a, 0x3c, 0x75, 0xca, 0x47, 0x68, 0xd1, 0xf1, 0x3d, 0x42, 0x79, 0x37, - 0x20, 0x1c, 0x8b, 0xa2, 0x4a, 0xba, 0x96, 0xa9, 0xeb, 0x1b, 0x0f, 0xae, 0xbd, 0x01, 0x4c, 0x62, - 0x29, 0xb3, 0x0d, 0x9b, 0xd6, 0x3b, 0x72, 0x72, 0x84, 0xfd, 0x01, 0x69, 0x65, 0x4f, 0x7f, 0xae, - 0xa4, 0xec, 0xb2, 0x64, 0xe9, 0x28, 0x12, 0xe3, 0x0d, 0xec, 0x59, 0x06, 0x9a, 0x7d, 0x32, 0x67, - 0xb3, 0xf2, 0xa0, 0x77, 0x53, 0xb0, 0xd9, 0x6f, 0x51, 0x86, 0x3b, 0x21, 0x38, 0x45, 0xdf, 0x78, - 0x3a, 0x27, 0x93, 0x32, 0xc1, 0x6e, 0xca, 0x16, 0x24, 0x82, 0x6b, 0xe0, 0x86, 0xe0, 0xac, 0xf9, - 0xb9, 0x94, 0x1b, 0x04, 0xd7, 0xc0, 0x0d, 0x8d, 0x0e, 0xca, 0x0e, 0xa8, 0x37, 0xaa, 0xe4, 0x80, - 0xec, 0xd9, 0xbc, 0x64, 0xca, 0x17, 0xbb, 0x29, 0x1b, 0x68, 0x5a, 0x4b, 0x48, 0xed, 0x60, 0x17, - 0xbb, 0x6e, 0x44, 0xe2, 0xd8, 0xfc, 0xa5, 0xa1, 0x85, 0x03, 0xf1, 0x61, 0xc4, 0xca, 0x11, 0xc6, - 0x2a, 0x2a, 0xf7, 0x58, 0x14, 0x60, 0xde, 0x1d, 0x92, 0x28, 0xf6, 0x18, 0x05, 0x33, 0xe4, 0xed, - 0x05, 0x99, 0x3d, 0x92, 0x49, 0xa3, 0x87, 0x16, 0xd5, 0x2d, 0x38, 0x31, 0x4d, 0x1a, 0x9a, 0x7c, - 0x35, 0x67, 0x93, 0xb3, 0x4e, 0xb4, 0xcb, 0xd1, 0xac, 0x33, 0xdb, 0xea, 0x5b, 0x8e, 0xbb, 0x60, - 0x1b, 0x79, 0xd6, 0xf5, 0x44, 0xdb, 0xc8, 0x4b, 0x72, 0xd8, 0xb4, 0xa4, 0xa0, 0x1d, 0xcc, 0xb1, - 0xfa, 0xea, 0x21, 0x36, 0x7f, 0x6b, 0xa8, 0xdc, 0x91, 0x97, 0xe3, 0x0d, 0x15, 0xfb, 0x01, 0x95, - 0xd4, 0xed, 0x3d, 0xad, 0x76, 0x2d, 0x51, 0xed, 0xf8, 0x9a, 0x1f, 0x36, 0x2d, 0x25, 0x0a, 0x04, - 0xeb, 0xc1, 0xd5, 0xc2, 0x3c, 0xd7, 0x90, 0xfe, 0x9e, 0xf5, 0x6f, 0xaa, 0xdc, 0x16, 0x2a, 0x8a, - 0x09, 0x34, 0xad, 0x75, 0x35, 0x51, 0x2b, 0xcc, 0xa9, 0x61, 0xd3, 0x12, 0x5a, 0x40, 0x65, 0xc1, - 0x57, 0x91, 0xf9, 0x47, 0x43, 0x8b, 0x7b, 0x6a, 0xc6, 0xdc, 0x50, 0x99, 0xc7, 0x68, 0x61, 0x3c, - 0x05, 0xa7, 0xa5, 0x3e, 0x4f, 0x94, 0x3a, 0x99, 0x97, 0x33, 0x63, 0xd4, 0x1a, 0x4b, 0x04, 0xf5, - 0xa5, 0x70, 0x6a, 0xd5, 0xea, 0x9c, 0x5e, 0x54, 0xb5, 0xb3, 0x8b, 0xaa, 0x76, 0x7e, 0x51, 0xd5, - 0xbe, 0x5d, 0x56, 0x53, 0x67, 0x97, 0xd5, 0xd4, 0x8f, 0xcb, 0x6a, 0xea, 0xe3, 0x66, 0x9f, 0x5d, - 0x7b, 0x87, 0x27, 0x66, 0xba, 0x12, 0xd3, 0x98, 0xf9, 0x8f, 0x33, 0x16, 0xf3, 0x29, 0x07, 0x7d, - 0x6c, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x46, 0x4d, 0x98, 0x93, 0x03, 0x09, 0x00, 0x00, -} - -func (m *SpanContext) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SpanContext) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SpanContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Remote { - i-- - if m.Remote { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.TraceState) > 0 { - i -= len(m.TraceState) - copy(dAtA[i:], m.TraceState) - i = encodeVarintRequest(dAtA, i, uint64(len(m.TraceState))) - i-- - dAtA[i] = 0x22 - } - if m.TraceFlags != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.TraceFlags)) - i-- - dAtA[i] = 0x1d - } - if len(m.SpanId) > 0 { - i -= len(m.SpanId) - copy(dAtA[i:], m.SpanId) - i = encodeVarintRequest(dAtA, i, uint64(len(m.SpanId))) - i-- - dAtA[i] = 0x12 - } - if len(m.TraceId) > 0 { - i -= len(m.TraceId) - copy(dAtA[i:], m.TraceId) - i = encodeVarintRequest(dAtA, i, uint64(len(m.TraceId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *IPAddr) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IPAddr) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IPAddr) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Zone) > 0 { - i -= len(m.Zone) - copy(dAtA[i:], m.Zone) - i = encodeVarintRequest(dAtA, i, uint64(len(m.Zone))) - i-- - dAtA[i] = 0x12 - } - if len(m.Ip) > 0 { - i -= len(m.Ip) - copy(dAtA[i:], m.Ip) - i = encodeVarintRequest(dAtA, i, uint64(len(m.Ip))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TCPAddr) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TCPAddr) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TCPAddr) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Zone) > 0 { - i -= len(m.Zone) - copy(dAtA[i:], m.Zone) - i = encodeVarintRequest(dAtA, i, uint64(len(m.Zone))) - i-- - dAtA[i] = 0x1a - } - if m.Port != 0 { - i = encodeVarintRequest(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x10 - } - if len(m.Ip) > 0 { - i -= len(m.Ip) - copy(dAtA[i:], m.Ip) - i = encodeVarintRequest(dAtA, i, uint64(len(m.Ip))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UDPAddr) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UDPAddr) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UDPAddr) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Zone) > 0 { - i -= len(m.Zone) - copy(dAtA[i:], m.Zone) - i = encodeVarintRequest(dAtA, i, uint64(len(m.Zone))) - i-- - dAtA[i] = 0x1a - } - if m.Port != 0 { - i = encodeVarintRequest(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x10 - } - if len(m.Ip) > 0 { - i -= len(m.Ip) - copy(dAtA[i:], m.Ip) - i = encodeVarintRequest(dAtA, i, uint64(len(m.Ip))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UnixAddr) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UnixAddr) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UnixAddr) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Net) > 0 { - i -= len(m.Net) - copy(dAtA[i:], m.Net) - i = encodeVarintRequest(dAtA, i, uint64(len(m.Net))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRequest(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RequestContext) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestContext) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ClientAddress != nil { - { - size := m.ClientAddress.Size() - i -= size - if _, err := m.ClientAddress.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if len(m.ClientMetadata) > 0 { - for iNdEx := len(m.ClientMetadata) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ClientMetadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.SpanContext != nil { - { - size, err := m.SpanContext.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RequestContext_Ip) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestContext_Ip) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Ip != nil { - { - size, err := m.Ip.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *RequestContext_Tcp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestContext_Tcp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Tcp != nil { - { - size, err := m.Tcp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func (m *RequestContext_Udp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestContext_Udp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Udp != nil { - { - size, err := m.Udp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - return len(dAtA) - i, nil -} -func (m *RequestContext_Unix) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestContext_Unix) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Unix != nil { - { - size, err := m.Unix.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} -func (m *TracesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TracesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TracesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TracesData != nil { - { - size, err := m.TracesData.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.RequestContext != nil { - { - size, err := m.RequestContext.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.FormatVersion != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.FormatVersion)) - i-- - dAtA[i] = 0xd - } - return len(dAtA) - i, nil -} - -func (m *MetricsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.MetricsData != nil { - { - size, err := m.MetricsData.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.RequestContext != nil { - { - size, err := m.RequestContext.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.FormatVersion != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.FormatVersion)) - i-- - dAtA[i] = 0xd - } - return len(dAtA) - i, nil -} - -func (m *LogsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LogsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LogsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.LogsData != nil { - { - size, err := m.LogsData.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.RequestContext != nil { - { - size, err := m.RequestContext.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.FormatVersion != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.FormatVersion)) - i-- - dAtA[i] = 0xd - } - return len(dAtA) - i, nil -} - -func (m *ProfilesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProfilesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProfilesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ProfilesData != nil { - { - size, err := m.ProfilesData.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.RequestContext != nil { - { - size, err := m.RequestContext.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRequest(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.FormatVersion != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.FormatVersion)) - i-- - dAtA[i] = 0xd - } - return len(dAtA) - i, nil -} - -func encodeVarintRequest(dAtA []byte, offset int, v uint64) int { - offset -= sovRequest(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *SpanContext) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.TraceId) - if l > 0 { - n += 1 + l + sovRequest(uint64(l)) - } - l = len(m.SpanId) - if l > 0 { - n += 1 + l + sovRequest(uint64(l)) - } - if m.TraceFlags != 0 { - n += 5 - } - l = len(m.TraceState) - if l > 0 { - n += 1 + l + sovRequest(uint64(l)) - } - if m.Remote { - n += 2 - } - return n -} - -func (m *IPAddr) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ip) - if l > 0 { - n += 1 + l + sovRequest(uint64(l)) - } - l = len(m.Zone) - if l > 0 { - n += 1 + l + sovRequest(uint64(l)) - } - return n -} - -func (m *TCPAddr) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ip) - if l > 0 { - n += 1 + l + sovRequest(uint64(l)) - } - if m.Port != 0 { - n += 1 + sovRequest(uint64(m.Port)) - } - l = len(m.Zone) - if l > 0 { - n += 1 + l + sovRequest(uint64(l)) - } - return n -} - -func (m *UDPAddr) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ip) - if l > 0 { - n += 1 + l + sovRequest(uint64(l)) - } - if m.Port != 0 { - n += 1 + sovRequest(uint64(m.Port)) - } - l = len(m.Zone) - if l > 0 { - n += 1 + l + sovRequest(uint64(l)) - } - return n -} - -func (m *UnixAddr) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRequest(uint64(l)) - } - l = len(m.Net) - if l > 0 { - n += 1 + l + sovRequest(uint64(l)) - } - return n -} - -func (m *RequestContext) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SpanContext != nil { - l = m.SpanContext.Size() - n += 1 + l + sovRequest(uint64(l)) - } - if len(m.ClientMetadata) > 0 { - for _, e := range m.ClientMetadata { - l = e.Size() - n += 1 + l + sovRequest(uint64(l)) - } - } - if m.ClientAddress != nil { - n += m.ClientAddress.Size() - } - return n -} - -func (m *RequestContext_Ip) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Ip != nil { - l = m.Ip.Size() - n += 1 + l + sovRequest(uint64(l)) - } - return n -} -func (m *RequestContext_Tcp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Tcp != nil { - l = m.Tcp.Size() - n += 1 + l + sovRequest(uint64(l)) - } - return n -} -func (m *RequestContext_Udp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Udp != nil { - l = m.Udp.Size() - n += 1 + l + sovRequest(uint64(l)) - } - return n -} -func (m *RequestContext_Unix) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Unix != nil { - l = m.Unix.Size() - n += 1 + l + sovRequest(uint64(l)) - } - return n -} -func (m *TracesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FormatVersion != 0 { - n += 5 - } - if m.RequestContext != nil { - l = m.RequestContext.Size() - n += 1 + l + sovRequest(uint64(l)) - } - if m.TracesData != nil { - l = m.TracesData.Size() - n += 1 + l + sovRequest(uint64(l)) - } - return n -} - -func (m *MetricsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FormatVersion != 0 { - n += 5 - } - if m.RequestContext != nil { - l = m.RequestContext.Size() - n += 1 + l + sovRequest(uint64(l)) - } - if m.MetricsData != nil { - l = m.MetricsData.Size() - n += 1 + l + sovRequest(uint64(l)) - } - return n -} - -func (m *LogsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FormatVersion != 0 { - n += 5 - } - if m.RequestContext != nil { - l = m.RequestContext.Size() - n += 1 + l + sovRequest(uint64(l)) - } - if m.LogsData != nil { - l = m.LogsData.Size() - n += 1 + l + sovRequest(uint64(l)) - } - return n -} - -func (m *ProfilesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FormatVersion != 0 { - n += 5 - } - if m.RequestContext != nil { - l = m.RequestContext.Size() - n += 1 + l + sovRequest(uint64(l)) - } - if m.ProfilesData != nil { - l = m.ProfilesData.Size() - n += 1 + l + sovRequest(uint64(l)) - } - return n -} - -func sovRequest(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozRequest(x uint64) (n int) { - return sovRequest(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *SpanContext) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SpanContext: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SpanContext: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TraceId = append(m.TraceId[:0], dAtA[iNdEx:postIndex]...) - if m.TraceId == nil { - m.TraceId = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SpanId = append(m.SpanId[:0], dAtA[iNdEx:postIndex]...) - if m.SpanId == nil { - m.SpanId = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceFlags", wireType) - } - m.TraceFlags = 0 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - m.TraceFlags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TraceState = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Remote = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRequest(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRequest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPAddr) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPAddr: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddr: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ip = append(m.Ip[:0], dAtA[iNdEx:postIndex]...) - if m.Ip == nil { - m.Ip = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Zone = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRequest(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRequest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TCPAddr) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TCPAddr: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TCPAddr: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ip = append(m.Ip[:0], dAtA[iNdEx:postIndex]...) - if m.Ip == nil { - m.Ip = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Zone = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRequest(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRequest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UDPAddr) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UDPAddr: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UDPAddr: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ip = append(m.Ip[:0], dAtA[iNdEx:postIndex]...) - if m.Ip == nil { - m.Ip = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Zone = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRequest(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRequest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UnixAddr) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UnixAddr: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UnixAddr: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Net", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Net = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRequest(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRequest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestContext) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestContext: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestContext: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanContext", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SpanContext == nil { - m.SpanContext = &SpanContext{} - } - if err := m.SpanContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientMetadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientMetadata = append(m.ClientMetadata, v1.KeyValue{}) - if err := m.ClientMetadata[len(m.ClientMetadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &IPAddr{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.ClientAddress = &RequestContext_Ip{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tcp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &TCPAddr{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.ClientAddress = &RequestContext_Tcp{v} - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Udp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &UDPAddr{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.ClientAddress = &RequestContext_Udp{v} - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Unix", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &UnixAddr{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.ClientAddress = &RequestContext_Unix{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRequest(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRequest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TracesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TracesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TracesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType) - } - m.FormatVersion = 0 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - m.FormatVersion = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestContext == nil { - m.RequestContext = &RequestContext{} - } - if err := m.RequestContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TracesData", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TracesData == nil { - m.TracesData = &v11.TracesData{} - } - if err := m.TracesData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRequest(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRequest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType) - } - m.FormatVersion = 0 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - m.FormatVersion = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestContext == nil { - m.RequestContext = &RequestContext{} - } - if err := m.RequestContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricsData", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MetricsData == nil { - m.MetricsData = &v12.MetricsData{} - } - if err := m.MetricsData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRequest(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRequest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LogsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LogsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LogsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType) - } - m.FormatVersion = 0 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - m.FormatVersion = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestContext == nil { - m.RequestContext = &RequestContext{} - } - if err := m.RequestContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LogsData", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LogsData == nil { - m.LogsData = &v13.LogsData{} - } - if err := m.LogsData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRequest(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRequest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProfilesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProfilesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProfilesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType) - } - m.FormatVersion = 0 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - m.FormatVersion = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestContext == nil { - m.RequestContext = &RequestContext{} - } - if err := m.RequestContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProfilesData", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRequest - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProfilesData == nil { - m.ProfilesData = &v1development.ProfilesData{} - } - if err := m.ProfilesData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRequest(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRequest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRequest(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRequest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRequest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRequest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthRequest - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupRequest - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthRequest - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthRequest = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRequest = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupRequest = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/internal/request.proto b/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/internal/request.proto deleted file mode 100644 index ba470516717..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/internal/request.proto +++ /dev/null @@ -1,92 +0,0 @@ -syntax = "proto3"; - -package opentelemetry.collector.pdata.xpdata.internal; - -option go_package = "go.opentelemetry.io/collector/pdata/xpdata/internal"; - -import "gogoproto/gogo.proto"; - -import "opentelemetry/proto/trace/v1/trace.proto"; -import "opentelemetry/proto/metrics/v1/metrics.proto"; -import "opentelemetry/proto/logs/v1/logs.proto"; -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/profiles/v1development/profiles.proto"; - -// SpanContext represents a span context encoded associated with a telemetry export request. -message SpanContext { - bytes trace_id = 1; - bytes span_id = 2; - fixed32 trace_flags = 3; - string trace_state = 4; - bool remote = 5; -} - -message IPAddr { - bytes ip = 1; - string zone = 2; -} - -message TCPAddr { - bytes ip = 1; - int64 port = 2; - string zone = 3; -} - -message UDPAddr { - bytes ip = 1; - int64 port = 2; - string zone = 3; -} - -message UnixAddr { - string name = 1; - string net = 2; -} - -// RequestContext represents metadata associated with a telemetry export request. -message RequestContext { - SpanContext span_context = 1; - - // ClientMetadata contains additional metadata about the client making the request. - repeated opentelemetry.proto.common.v1.KeyValue client_metadata = 2 [ (gogoproto.nullable) = false ]; - - // ClientAddress contains the address of the client making the request. - oneof client_address { - IPAddr ip = 3; - TCPAddr tcp = 4; - UDPAddr udp = 5; - UnixAddr unix = 6; - } -} - -// The following messages are wrappers around standard OpenTelemetry data types. -// They embed request-level context and a version discriminator to ensure they are not wire-compatible with -// the canonical OpenTelemetry proto messages. -// -// Each wrapper reserves field tag 1 for a `fixed32` (protobuf wire type 5) format_version field, which makes it -// structurally incompatible with the standard OTLP messages which use tag 1 for the data message field (protobuf wire type 2). -// This ensures old and new formats cannot be confused during decoding. - -message TracesRequest { - fixed32 format_version = 1; - RequestContext request_context = 2; - opentelemetry.proto.trace.v1.TracesData traces_data = 3; -} - -message MetricsRequest { - fixed32 format_version = 1; - RequestContext request_context = 2; - opentelemetry.proto.metrics.v1.MetricsData metrics_data = 3; -} - -message LogsRequest { - fixed32 format_version = 1; - RequestContext request_context = 2; - opentelemetry.proto.logs.v1.LogsData logs_data = 3; -} - -message ProfilesRequest { - fixed32 format_version = 1; - RequestContext request_context = 2; - opentelemetry.proto.profiles.v1development.ProfilesData profiles_data = 3; -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/logs_request.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/logs_request.go index a5483ce4872..5d4a20ff230 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/logs_request.go +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/logs_request.go @@ -9,19 +9,18 @@ import ( "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/plog" - reqint "go.opentelemetry.io/collector/pdata/xpdata/request/internal" ) // MarshalLogs marshals plog.Logs along with the context into a byte slice. func MarshalLogs(ctx context.Context, ld plog.Logs) ([]byte, error) { - otlpLogs := internal.LogsToProto(internal.Logs(ld)) - rc := encodeContext(ctx) - lr := reqint.LogsRequest{ + lr := internal.LogsRequest{ FormatVersion: requestFormatVersion, - LogsData: &otlpLogs, - RequestContext: &rc, + LogsData: internal.LogsToProto(internal.LogsWrapper(ld)), + RequestContext: encodeContext(ctx), } - return lr.Marshal() + buf := make([]byte, lr.SizeProto()) + lr.MarshalProto(buf) + return buf, nil } // UnmarshalLogs unmarshals a byte slice into plog.Logs and the context. @@ -30,9 +29,9 @@ func UnmarshalLogs(buf []byte) (context.Context, plog.Logs, error) { if !isRequestPayloadV1(buf) { return ctx, plog.Logs{}, ErrInvalidFormat } - lr := reqint.LogsRequest{} - if err := lr.Unmarshal(buf); err != nil { + lr := internal.LogsRequest{} + if err := lr.UnmarshalProto(buf); err != nil { return ctx, plog.Logs{}, fmt.Errorf("failed to unmarshal logs request: %w", err) } - return decodeContext(ctx, lr.RequestContext), plog.Logs(internal.LogsFromProto(*lr.LogsData)), nil + return decodeContext(ctx, lr.RequestContext), plog.Logs(internal.LogsFromProto(lr.LogsData)), nil } diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/metrics_request.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/metrics_request.go index b4796da5243..38e59bf6893 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/metrics_request.go +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/metrics_request.go @@ -9,19 +9,18 @@ import ( "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/pmetric" - reqint "go.opentelemetry.io/collector/pdata/xpdata/request/internal" ) // MarshalMetrics marshals pmetric.Metrics along with the context into a byte slice. func MarshalMetrics(ctx context.Context, ld pmetric.Metrics) ([]byte, error) { - otlpMetrics := internal.MetricsToProto(internal.Metrics(ld)) - rc := encodeContext(ctx) - mr := reqint.MetricsRequest{ + mr := internal.MetricsRequest{ FormatVersion: requestFormatVersion, - MetricsData: &otlpMetrics, - RequestContext: &rc, + MetricsData: internal.MetricsToProto(internal.MetricsWrapper(ld)), + RequestContext: encodeContext(ctx), } - return mr.Marshal() + buf := make([]byte, mr.SizeProto()) + mr.MarshalProto(buf) + return buf, nil } // UnmarshalMetrics unmarshals a byte slice into pmetric.Metrics and the context. @@ -30,9 +29,9 @@ func UnmarshalMetrics(buf []byte) (context.Context, pmetric.Metrics, error) { if !isRequestPayloadV1(buf) { return ctx, pmetric.Metrics{}, ErrInvalidFormat } - mr := reqint.MetricsRequest{} - if err := mr.Unmarshal(buf); err != nil { + mr := internal.MetricsRequest{} + if err := mr.UnmarshalProto(buf); err != nil { return ctx, pmetric.Metrics{}, fmt.Errorf("failed to unmarshal metrics request: %w", err) } - return decodeContext(ctx, mr.RequestContext), pmetric.Metrics(internal.MetricsFromProto(*mr.MetricsData)), nil + return decodeContext(ctx, mr.RequestContext), pmetric.Metrics(internal.MetricsFromProto(mr.MetricsData)), nil } diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/profiles_request.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/profiles_request.go index b13c065fe92..de099ea81cf 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/profiles_request.go +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/profiles_request.go @@ -9,19 +9,18 @@ import ( "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/pprofile" - reqint "go.opentelemetry.io/collector/pdata/xpdata/request/internal" ) // MarshalProfiles marshals pprofile.Profiles along with the context into a byte slice. func MarshalProfiles(ctx context.Context, ld pprofile.Profiles) ([]byte, error) { - otlpProfiles := internal.ProfilesToProto(internal.Profiles(ld)) - rc := encodeContext(ctx) - pr := reqint.ProfilesRequest{ + pr := internal.ProfilesRequest{ FormatVersion: requestFormatVersion, - ProfilesData: &otlpProfiles, - RequestContext: &rc, + ProfilesData: internal.ProfilesToProto(internal.ProfilesWrapper(ld)), + RequestContext: encodeContext(ctx), } - return pr.Marshal() + buf := make([]byte, pr.SizeProto()) + pr.MarshalProto(buf) + return buf, nil } // UnmarshalProfiles unmarshals a byte slice into pprofile.Profiles and the context. @@ -30,9 +29,9 @@ func UnmarshalProfiles(buf []byte) (context.Context, pprofile.Profiles, error) { if !isRequestPayloadV1(buf) { return ctx, pprofile.Profiles{}, ErrInvalidFormat } - pr := reqint.ProfilesRequest{} - if err := pr.Unmarshal(buf); err != nil { + pr := internal.ProfilesRequest{} + if err := pr.UnmarshalProto(buf); err != nil { return ctx, pprofile.Profiles{}, fmt.Errorf("failed to unmarshal profiles request: %w", err) } - return decodeContext(ctx, pr.RequestContext), pprofile.Profiles(internal.ProfilesFromProto(*pr.ProfilesData)), nil + return decodeContext(ctx, pr.RequestContext), pprofile.Profiles(internal.ProfilesFromProto(pr.ProfilesData)), nil } diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/traces_request.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/traces_request.go index ff5312b1231..c427ccfc781 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/traces_request.go +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/request/traces_request.go @@ -9,19 +9,18 @@ import ( "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/ptrace" - reqint "go.opentelemetry.io/collector/pdata/xpdata/request/internal" ) // MarshalTraces marshals ptrace.Traces along with the context into a byte slice. func MarshalTraces(ctx context.Context, ld ptrace.Traces) ([]byte, error) { - otlpTraces := internal.TracesToProto(internal.Traces(ld)) - rc := encodeContext(ctx) - tr := reqint.TracesRequest{ + tr := internal.TracesRequest{ FormatVersion: requestFormatVersion, - TracesData: &otlpTraces, - RequestContext: &rc, + TracesData: internal.TracesToProto(internal.TracesWrapper(ld)), + RequestContext: encodeContext(ctx), } - return tr.Marshal() + buf := make([]byte, tr.SizeProto()) + tr.MarshalProto(buf) + return buf, nil } // UnmarshalTraces unmarshals a byte slice into ptrace.Traces and the context. @@ -30,9 +29,9 @@ func UnmarshalTraces(buf []byte) (context.Context, ptrace.Traces, error) { if !isRequestPayloadV1(buf) { return ctx, ptrace.Traces{}, ErrInvalidFormat } - tr := reqint.TracesRequest{} - if err := tr.Unmarshal(buf); err != nil { + tr := internal.TracesRequest{} + if err := tr.UnmarshalProto(buf); err != nil { return ctx, ptrace.Traces{}, fmt.Errorf("failed to unmarshal traces request: %w", err) } - return decodeContext(ctx, tr.RequestContext), ptrace.Traces(internal.TracesFromProto(*tr.TracesData)), nil + return decodeContext(ctx, tr.RequestContext), ptrace.Traces(internal.TracesFromProto(tr.TracesData)), nil } diff --git a/vendor/go.opentelemetry.io/collector/processor/processor.go b/vendor/go.opentelemetry.io/collector/processor/processor.go index 93ed0b69f65..8b1203e9f53 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processor.go +++ b/vendor/go.opentelemetry.io/collector/processor/processor.go @@ -8,8 +8,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/internal/componentalias" "go.opentelemetry.io/collector/pipeline" - "go.opentelemetry.io/collector/processor/internal" ) // Traces is a processor that can consume traces. @@ -99,6 +99,7 @@ func (f factoryOptionFunc) applyOption(o *factory) { type factory struct { cfgType component.Type component.CreateDefaultConfigFunc + componentalias.TypeAliasHolder createTracesFunc CreateTracesFunc tracesStabilityLevel component.StabilityLevel createMetricsFunc CreateMetricsFunc @@ -130,8 +131,8 @@ func (f *factory) CreateTraces(ctx context.Context, set Settings, cfg component. return nil, pipeline.ErrSignalNotSupported } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createTracesFunc(ctx, set, cfg, next) @@ -142,8 +143,8 @@ func (f *factory) CreateMetrics(ctx context.Context, set Settings, cfg component return nil, pipeline.ErrSignalNotSupported } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createMetricsFunc(ctx, set, cfg, next) @@ -154,8 +155,8 @@ func (f *factory) CreateLogs(ctx context.Context, set Settings, cfg component.Co return nil, pipeline.ErrSignalNotSupported } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createLogsFunc(ctx, set, cfg, next) @@ -199,6 +200,7 @@ func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefa f := &factory{ cfgType: cfgType, CreateDefaultConfigFunc: createDefaultConfig, + TypeAliasHolder: componentalias.NewTypeAliasHolder(), } for _, opt := range options { opt.applyOption(f) diff --git a/vendor/go.opentelemetry.io/collector/processor/xprocessor/processor.go b/vendor/go.opentelemetry.io/collector/processor/xprocessor/processor.go index b91c44b9889..b2a6e86cd06 100644 --- a/vendor/go.opentelemetry.io/collector/processor/xprocessor/processor.go +++ b/vendor/go.opentelemetry.io/collector/processor/xprocessor/processor.go @@ -8,9 +8,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/internal/componentalias" "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/processor/internal" ) // Factory is a component.Factory interface for processors. @@ -42,76 +42,82 @@ type CreateProfilesFunc func(context.Context, processor.Settings, component.Conf // FactoryOption apply changes to ReceiverOptions. type FactoryOption interface { // applyOption applies the option. - applyOption(o *factoryOpts) + applyOption(o *factory) } // factoryOptionFunc is an ReceiverFactoryOption created through a function. -type factoryOptionFunc func(*factoryOpts) +type factoryOptionFunc func(*factory) -func (f factoryOptionFunc) applyOption(o *factoryOpts) { +func (f factoryOptionFunc) applyOption(o *factory) { f(o) } type factory struct { processor.Factory + componentalias.TypeAliasHolder + opts []processor.FactoryOption createProfilesFunc CreateProfilesFunc profilesStabilityLevel component.StabilityLevel } -func (f factory) ProfilesStability() component.StabilityLevel { +func (f *factory) ProfilesStability() component.StabilityLevel { return f.profilesStabilityLevel } -func (f factory) CreateProfiles(ctx context.Context, set processor.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) { +func (f *factory) CreateProfiles(ctx context.Context, set processor.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) { if f.createProfilesFunc == nil { return nil, pipeline.ErrSignalNotSupported } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f.Factory, set.ID); err != nil { + return nil, err } return f.createProfilesFunc(ctx, set, cfg, next) } -type factoryOpts struct { - opts []processor.FactoryOption - *factory -} - // WithTraces overrides the default "error not supported" implementation for CreateTraces and the default "undefined" stability level. func WithTraces(createTraces processor.CreateTracesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, processor.WithTraces(createTraces, sl)) }) } // WithMetrics overrides the default "error not supported" implementation for CreateMetrics and the default "undefined" stability level. func WithMetrics(createMetrics processor.CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, processor.WithMetrics(createMetrics, sl)) }) } // WithLogs overrides the default "error not supported" implementation for CreateLogs and the default "undefined" stability level. func WithLogs(createLogs processor.CreateLogsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, processor.WithLogs(createLogs, sl)) }) } // WithProfiles overrides the default "error not supported" implementation for CreateProfiles and the default "undefined" stability level. func WithProfiles(createProfiles CreateProfilesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.profilesStabilityLevel = sl o.createProfilesFunc = createProfiles }) } -// NewFactory returns a Factory. +// WithDeprecatedTypeAlias configures a deprecated type alias for the processor. Only one alias is supported per processor. +// When the alias is used in configuration, a deprecation warning is automatically logged. +func WithDeprecatedTypeAlias(alias component.Type) FactoryOption { + return factoryOptionFunc(func(o *factory) { + o.SetDeprecatedAlias(alias) + }) +} + +// NewFactory creates a wrapped processor.Factory with experimental capabilities. func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { - opts := factoryOpts{factory: &factory{}} + f := &factory{TypeAliasHolder: componentalias.NewTypeAliasHolder()} for _, opt := range options { - opt.applyOption(&opts) + opt.applyOption(f) } - opts.Factory = processor.NewFactory(cfgType, createDefaultConfig, opts.opts...) - return opts.factory + f.Factory = processor.NewFactory(cfgType, createDefaultConfig, f.opts...) + f.Factory.(componentalias.TypeAliasHolder).SetDeprecatedAlias(f.DeprecatedAlias()) + return f } diff --git a/vendor/go.opentelemetry.io/collector/receiver/internal/err.go b/vendor/go.opentelemetry.io/collector/receiver/internal/err.go deleted file mode 100644 index 7969e79afcc..00000000000 --- a/vendor/go.opentelemetry.io/collector/receiver/internal/err.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/collector/receiver/internal" - -import ( - "fmt" - - "go.opentelemetry.io/collector/component" -) - -func ErrIDMismatch(id component.ID, typ component.Type) error { - return fmt.Errorf("component type mismatch: component ID %q does not have type %q", id, typ) -} diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md index c313937e29e..9324dbe0526 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md @@ -1,6 +1,5 @@ -# OTLP Receiver - +# OTLP Receiver | Status | | | ------------- |-----------| | Stability | [development]: profiles | diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go index 1b6f483a1e3..99c2bb5420e 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go @@ -46,7 +46,7 @@ func createDefaultConfig() component.Config { grpcCfg.ReadBufferSize = 512 * 1024 httpCfg := confighttp.NewDefaultServerConfig() - httpCfg.Endpoint = "localhost:4318" + httpCfg.NetAddr.Endpoint = "localhost:4318" // For backward compatibility: httpCfg.TLS = configoptional.None[configtls.ServerConfig]() httpCfg.WriteTimeout = 0 diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles/otlp.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles/otlp.go index 269769b6824..d111618e6ad 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles/otlp.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles/otlp.go @@ -9,18 +9,23 @@ import ( "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors" + "go.opentelemetry.io/collector/receiver/receiverhelper" ) +const dataFormatProtobuf = "protobuf" + // Receiver is the type used to handle spans from OpenTelemetry exporters. type Receiver struct { pprofileotlp.UnimplementedGRPCServer nextConsumer xconsumer.Profiles + obsreport *receiverhelper.ObsReport } // New creates a new Receiver reference. -func New(nextConsumer xconsumer.Profiles) *Receiver { +func New(nextConsumer xconsumer.Profiles, obsreport *receiverhelper.ObsReport) *Receiver { return &Receiver{ nextConsumer: nextConsumer, + obsreport: obsreport, } } @@ -28,12 +33,15 @@ func New(nextConsumer xconsumer.Profiles) *Receiver { func (r *Receiver) Export(ctx context.Context, req pprofileotlp.ExportRequest) (pprofileotlp.ExportResponse, error) { td := req.Profiles() // We need to ensure that it propagates the receiver name as a tag - numProfiles := td.SampleCount() - if numProfiles == 0 { + numSamples := td.SampleCount() + if numSamples == 0 { return pprofileotlp.NewExportResponse(), nil } + ctx = r.obsreport.StartTracesOp(ctx) err := r.nextConsumer.ConsumeProfiles(ctx, td) + r.obsreport.EndTracesOp(ctx, dataFormatProtobuf, numSamples, err) + // Use appropriate status codes for permanent/non-permanent errors // If we return the error straightaway, then the grpc implementation will set status code to Unknown // Refer: https://github.com/grpc/grpc-go/blob/v1.59.0/server.go#L1345 diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml index adb2cae28f0..bc971d73f7e 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml @@ -1,3 +1,4 @@ +display_name: OTLP Receiver type: otlp github_project: open-telemetry/opentelemetry-collector diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go index 7f06b89773d..5b35b7576c8 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go @@ -19,7 +19,6 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/internal/telemetry" - "go.opentelemetry.io/collector/internal/telemetry/componentattribute" "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" @@ -54,7 +53,7 @@ type otlpReceiver struct { // responsibility to invoke the respective Start*Reception methods as well // as the various Stop*Reception methods to end it. func newOtlpReceiver(cfg *Config, set *receiver.Settings) (*otlpReceiver, error) { - set.TelemetrySettings = telemetry.WithoutAttributes(set.TelemetrySettings, componentattribute.SignalKey) + set.TelemetrySettings = telemetry.DropInjectedAttributes(set.TelemetrySettings, telemetry.SignalKey) set.Logger.Debug("created signal-agnostic logger") r := &otlpReceiver{ cfg: cfg, @@ -94,7 +93,7 @@ func (r *otlpReceiver) startGRPCServer(ctx context.Context, host component.Host) grpcCfg := r.cfg.GRPC.Get() var err error - if r.serverGRPC, err = grpcCfg.ToServer(ctx, host, r.settings.TelemetrySettings); err != nil { + if r.serverGRPC, err = grpcCfg.ToServer(ctx, host.GetExtensions(), r.settings.TelemetrySettings); err != nil { return err } @@ -111,7 +110,7 @@ func (r *otlpReceiver) startGRPCServer(ctx context.Context, host component.Host) } if r.nextProfiles != nil { - pprofileotlp.RegisterGRPCServer(r.serverGRPC, profiles.New(r.nextProfiles)) + pprofileotlp.RegisterGRPCServer(r.serverGRPC, profiles.New(r.nextProfiles, r.obsrepGRPC)) } var gln net.Listener @@ -120,14 +119,11 @@ func (r *otlpReceiver) startGRPCServer(ctx context.Context, host component.Host) } r.settings.Logger.Info("Starting GRPC server", zap.String("endpoint", gln.Addr().String())) - r.shutdownWG.Add(1) - go func() { - defer r.shutdownWG.Done() - + r.shutdownWG.Go(func() { if errGrpc := r.serverGRPC.Serve(gln); errGrpc != nil && !errors.Is(errGrpc, grpc.ErrServerStopped) { componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errGrpc)) } - }() + }) return nil } @@ -161,14 +157,14 @@ func (r *otlpReceiver) startHTTPServer(ctx context.Context, host component.Host) } if r.nextProfiles != nil { - httpProfilesReceiver := profiles.New(r.nextProfiles) + httpProfilesReceiver := profiles.New(r.nextProfiles, r.obsrepHTTP) httpMux.HandleFunc(defaultProfilesURLPath, func(resp http.ResponseWriter, req *http.Request) { handleProfiles(resp, req, httpProfilesReceiver) }) } var err error - if r.serverHTTP, err = httpCfg.ServerConfig.ToServer(ctx, host, r.settings.TelemetrySettings, httpMux, confighttp.WithErrorHandler(errorHandler)); err != nil { + if r.serverHTTP, err = httpCfg.ServerConfig.ToServer(ctx, host.GetExtensions(), r.settings.TelemetrySettings, httpMux, confighttp.WithErrorHandler(errorHandler)); err != nil { return err } @@ -178,14 +174,11 @@ func (r *otlpReceiver) startHTTPServer(ctx context.Context, host component.Host) } r.settings.Logger.Info("Starting HTTP server", zap.String("endpoint", hln.Addr().String())) - r.shutdownWG.Add(1) - go func() { - defer r.shutdownWG.Done() - + r.shutdownWG.Go(func() { if errHTTP := r.serverHTTP.Serve(hln); errHTTP != nil && !errors.Is(errHTTP, http.ErrServerClosed) { componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP)) } - }() + }) return nil } diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiver.go b/vendor/go.opentelemetry.io/collector/receiver/receiver.go index f2aa1fe2c58..833951574fe 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiver.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receiver.go @@ -8,8 +8,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/internal/componentalias" "go.opentelemetry.io/collector/pipeline" - "go.opentelemetry.io/collector/receiver/internal" ) // Traces receiver receives traces. @@ -115,6 +115,7 @@ type CreateLogsFunc func(context.Context, Settings, component.Config, consumer.L type factory struct { cfgType component.Type component.CreateDefaultConfigFunc + componentalias.TypeAliasHolder createTracesFunc CreateTracesFunc tracesStabilityLevel component.StabilityLevel createMetricsFunc CreateMetricsFunc @@ -146,8 +147,8 @@ func (f *factory) CreateTraces(ctx context.Context, set Settings, cfg component. return nil, pipeline.ErrSignalNotSupported } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createTracesFunc(ctx, set, cfg, next) @@ -158,8 +159,8 @@ func (f *factory) CreateMetrics(ctx context.Context, set Settings, cfg component return nil, pipeline.ErrSignalNotSupported } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createMetricsFunc(ctx, set, cfg, next) @@ -170,8 +171,8 @@ func (f *factory) CreateLogs(ctx context.Context, set Settings, cfg component.Co return nil, pipeline.ErrSignalNotSupported } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f, set.ID); err != nil { + return nil, err } return f.createLogsFunc(ctx, set, cfg, next) @@ -206,6 +207,7 @@ func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefa f := &factory{ cfgType: cfgType, CreateDefaultConfigFunc: createDefaultConfig, + TypeAliasHolder: componentalias.NewTypeAliasHolder(), } for _, opt := range options { opt.applyOption(f) diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/documentation.md b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/documentation.md index ed266fae300..d04fcb8e8ce 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/documentation.md +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/documentation.md @@ -8,83 +8,107 @@ The following telemetry is emitted by this component. ### otelcol_receiver_accepted_log_records -Number of log records successfully pushed into the pipeline. [alpha] +Number of log records successfully pushed into the pipeline. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {records} | Sum | Int | true | alpha | +| {records} | Sum | Int | true | Alpha | ### otelcol_receiver_accepted_metric_points -Number of metric points successfully pushed into the pipeline. [alpha] +Number of metric points successfully pushed into the pipeline. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {datapoints} | Sum | Int | true | alpha | +| {datapoints} | Sum | Int | true | Alpha | + +### otelcol_receiver_accepted_profile_samples + +Number of profile samples successfully pushed into the pipeline. + +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {samples} | Sum | Int | true | Alpha | ### otelcol_receiver_accepted_spans -Number of spans successfully pushed into the pipeline. [alpha] +Number of spans successfully pushed into the pipeline. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {spans} | Sum | Int | true | alpha | +| {spans} | Sum | Int | true | Alpha | ### otelcol_receiver_failed_log_records -The number of log records that failed to be processed by the receiver due to internal errors. [alpha] +The number of log records that failed to be processed by the receiver due to internal errors. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {records} | Sum | Int | true | alpha | +| {records} | Sum | Int | true | Alpha | ### otelcol_receiver_failed_metric_points -The number of metric points that failed to be processed by the receiver due to internal errors. [alpha] +The number of metric points that failed to be processed by the receiver due to internal errors. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {datapoints} | Sum | Int | true | alpha | +| {datapoints} | Sum | Int | true | Alpha | + +### otelcol_receiver_failed_profile_samples + +The number of profile samples that failed to be processed by the receiver due to internal errors. + +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {samples} | Sum | Int | true | Alpha | ### otelcol_receiver_failed_spans -The number of spans that failed to be processed by the receiver due to internal errors. [alpha] +The number of spans that failed to be processed by the receiver due to internal errors. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {spans} | Sum | Int | true | alpha | +| {spans} | Sum | Int | true | Alpha | ### otelcol_receiver_refused_log_records -Number of log records that could not be pushed into the pipeline. [alpha] +Number of log records that could not be pushed into the pipeline. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {records} | Sum | Int | true | alpha | +| {records} | Sum | Int | true | Alpha | ### otelcol_receiver_refused_metric_points -Number of metric points that could not be pushed into the pipeline. [alpha] +Number of metric points that could not be pushed into the pipeline. + +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {datapoints} | Sum | Int | true | Alpha | + +### otelcol_receiver_refused_profile_samples + +Number of profile samples that could not be pushed into the pipeline. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {datapoints} | Sum | Int | true | alpha | +| {samples} | Sum | Int | true | Alpha | ### otelcol_receiver_refused_spans -Number of spans that could not be pushed into the pipeline. [alpha] +Number of spans that could not be pushed into the pipeline. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {spans} | Sum | Int | true | alpha | +| {spans} | Sum | Int | true | Alpha | ### otelcol_receiver_requests -The number of requests performed. [alpha] +The number of requests performed. | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| {requests} | Sum | Int | true | alpha | +| {requests} | Sum | Int | true | Alpha | #### Attributes diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go index 962bc2e90fe..1feff12bc62 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go @@ -23,19 +23,22 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer { // TelemetryBuilder provides an interface for components to report telemetry // as defined in metadata and user config. type TelemetryBuilder struct { - meter metric.Meter - mu sync.Mutex - registrations []metric.Registration - ReceiverAcceptedLogRecords metric.Int64Counter - ReceiverAcceptedMetricPoints metric.Int64Counter - ReceiverAcceptedSpans metric.Int64Counter - ReceiverFailedLogRecords metric.Int64Counter - ReceiverFailedMetricPoints metric.Int64Counter - ReceiverFailedSpans metric.Int64Counter - ReceiverRefusedLogRecords metric.Int64Counter - ReceiverRefusedMetricPoints metric.Int64Counter - ReceiverRefusedSpans metric.Int64Counter - ReceiverRequests metric.Int64Counter + meter metric.Meter + mu sync.Mutex + registrations []metric.Registration + ReceiverAcceptedLogRecords metric.Int64Counter + ReceiverAcceptedMetricPoints metric.Int64Counter + ReceiverAcceptedProfileSamples metric.Int64Counter + ReceiverAcceptedSpans metric.Int64Counter + ReceiverFailedLogRecords metric.Int64Counter + ReceiverFailedMetricPoints metric.Int64Counter + ReceiverFailedProfileSamples metric.Int64Counter + ReceiverFailedSpans metric.Int64Counter + ReceiverRefusedLogRecords metric.Int64Counter + ReceiverRefusedMetricPoints metric.Int64Counter + ReceiverRefusedProfileSamples metric.Int64Counter + ReceiverRefusedSpans metric.Int64Counter + ReceiverRequests metric.Int64Counter } // TelemetryBuilderOption applies changes to default builder. @@ -69,61 +72,79 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...Teleme var err, errs error builder.ReceiverAcceptedLogRecords, err = builder.meter.Int64Counter( "otelcol_receiver_accepted_log_records", - metric.WithDescription("Number of log records successfully pushed into the pipeline. [alpha]"), + metric.WithDescription("Number of log records successfully pushed into the pipeline. [Alpha]"), metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ReceiverAcceptedMetricPoints, err = builder.meter.Int64Counter( "otelcol_receiver_accepted_metric_points", - metric.WithDescription("Number of metric points successfully pushed into the pipeline. [alpha]"), + metric.WithDescription("Number of metric points successfully pushed into the pipeline. [Alpha]"), metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) + builder.ReceiverAcceptedProfileSamples, err = builder.meter.Int64Counter( + "otelcol_receiver_accepted_profile_samples", + metric.WithDescription("Number of profile samples successfully pushed into the pipeline. [Alpha]"), + metric.WithUnit("{samples}"), + ) + errs = errors.Join(errs, err) builder.ReceiverAcceptedSpans, err = builder.meter.Int64Counter( "otelcol_receiver_accepted_spans", - metric.WithDescription("Number of spans successfully pushed into the pipeline. [alpha]"), + metric.WithDescription("Number of spans successfully pushed into the pipeline. [Alpha]"), metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) builder.ReceiverFailedLogRecords, err = builder.meter.Int64Counter( "otelcol_receiver_failed_log_records", - metric.WithDescription("The number of log records that failed to be processed by the receiver due to internal errors. [alpha]"), + metric.WithDescription("The number of log records that failed to be processed by the receiver due to internal errors. [Alpha]"), metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ReceiverFailedMetricPoints, err = builder.meter.Int64Counter( "otelcol_receiver_failed_metric_points", - metric.WithDescription("The number of metric points that failed to be processed by the receiver due to internal errors. [alpha]"), + metric.WithDescription("The number of metric points that failed to be processed by the receiver due to internal errors. [Alpha]"), metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) + builder.ReceiverFailedProfileSamples, err = builder.meter.Int64Counter( + "otelcol_receiver_failed_profile_samples", + metric.WithDescription("The number of profile samples that failed to be processed by the receiver due to internal errors. [Alpha]"), + metric.WithUnit("{samples}"), + ) + errs = errors.Join(errs, err) builder.ReceiverFailedSpans, err = builder.meter.Int64Counter( "otelcol_receiver_failed_spans", - metric.WithDescription("The number of spans that failed to be processed by the receiver due to internal errors. [alpha]"), + metric.WithDescription("The number of spans that failed to be processed by the receiver due to internal errors. [Alpha]"), metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) builder.ReceiverRefusedLogRecords, err = builder.meter.Int64Counter( "otelcol_receiver_refused_log_records", - metric.WithDescription("Number of log records that could not be pushed into the pipeline. [alpha]"), + metric.WithDescription("Number of log records that could not be pushed into the pipeline. [Alpha]"), metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ReceiverRefusedMetricPoints, err = builder.meter.Int64Counter( "otelcol_receiver_refused_metric_points", - metric.WithDescription("Number of metric points that could not be pushed into the pipeline. [alpha]"), + metric.WithDescription("Number of metric points that could not be pushed into the pipeline. [Alpha]"), metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) + builder.ReceiverRefusedProfileSamples, err = builder.meter.Int64Counter( + "otelcol_receiver_refused_profile_samples", + metric.WithDescription("Number of profile samples that could not be pushed into the pipeline. [Alpha]"), + metric.WithUnit("{samples}"), + ) + errs = errors.Join(errs, err) builder.ReceiverRefusedSpans, err = builder.meter.Int64Counter( "otelcol_receiver_refused_spans", - metric.WithDescription("Number of spans that could not be pushed into the pipeline. [alpha]"), + metric.WithDescription("Number of spans that could not be pushed into the pipeline. [Alpha]"), metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) builder.ReceiverRequests, err = builder.meter.Int64Counter( "otelcol_receiver_requests", - metric.WithDescription("The number of requests performed. [alpha]"), + metric.WithDescription("The number of requests performed. [Alpha]"), metric.WithUnit("{requests}"), ) errs = errors.Join(errs, err) diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/obsmetrics.go b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/obsmetrics.go index c44aa2df47a..79d335d5cdc 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/obsmetrics.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/obsmetrics.go @@ -38,7 +38,15 @@ const ( // FailedLogRecordsKey used to identify log records failed to be processed by the Collector. FailedLogRecordsKey = "failed_log_records" + // AcceptedProfileSamplesKey used to identify profile samples accepted by the Collector. + AcceptedProfileSamplesKey = "accepted_profile_samples" + // RefusedProfileSamplesKey used to identify profile samples refused (ie.: not ingested) by the Collector. + RefusedProfileSamplesKey = "refused_profile_samples" + // FailedProfileSamplesKey used to identify profile samples failed to be processed by the Collector. + FailedProfileSamplesKey = "failed_profile_samples" + ReceiveTraceDataOperationSuffix = SpanNameSep + "TraceDataReceived" ReceiverMetricsOperationSuffix = SpanNameSep + "MetricsReceived" ReceiverLogsOperationSuffix = SpanNameSep + "LogsReceived" + ReceiverProfilesOperationSuffix = SpanNameSep + "ProfilesReceived" ) diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/metadata.yaml b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/metadata.yaml index e5854f996b5..ba9c7908619 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/metadata.yaml @@ -9,26 +9,31 @@ telemetry: metrics: receiver_accepted_log_records: enabled: true - stability: - level: alpha + stability: alpha description: Number of log records successfully pushed into the pipeline. unit: "{records}" sum: value_type: int monotonic: true receiver_accepted_metric_points: - stability: - level: alpha + stability: alpha enabled: true description: Number of metric points successfully pushed into the pipeline. unit: "{datapoints}" sum: value_type: int monotonic: true + receiver_accepted_profile_samples: + enabled: true + stability: alpha + description: Number of profile samples successfully pushed into the pipeline. + unit: "{samples}" + sum: + value_type: int + monotonic: true receiver_accepted_spans: enabled: true - stability: - level: alpha + stability: alpha description: Number of spans successfully pushed into the pipeline. unit: "{spans}" sum: @@ -36,8 +41,7 @@ telemetry: monotonic: true receiver_failed_log_records: enabled: true - stability: - level: alpha + stability: alpha description: The number of log records that failed to be processed by the receiver due to internal errors. unit: "{records}" sum: @@ -45,17 +49,23 @@ telemetry: monotonic: true receiver_failed_metric_points: enabled: true - stability: - level: alpha + stability: alpha description: The number of metric points that failed to be processed by the receiver due to internal errors. unit: "{datapoints}" sum: value_type: int monotonic: true + receiver_failed_profile_samples: + enabled: true + stability: alpha + description: The number of profile samples that failed to be processed by the receiver due to internal errors. + unit: "{samples}" + sum: + value_type: int + monotonic: true receiver_failed_spans: enabled: true - stability: - level: alpha + stability: alpha description: The number of spans that failed to be processed by the receiver due to internal errors. unit: "{spans}" sum: @@ -63,8 +73,7 @@ telemetry: monotonic: true receiver_refused_log_records: enabled: true - stability: - level: alpha + stability: alpha description: Number of log records that could not be pushed into the pipeline. unit: "{records}" sum: @@ -72,17 +81,23 @@ telemetry: monotonic: true receiver_refused_metric_points: enabled: true - stability: - level: alpha + stability: alpha description: Number of metric points that could not be pushed into the pipeline. unit: "{datapoints}" sum: value_type: int monotonic: true + receiver_refused_profile_samples: + enabled: true + stability: alpha + description: Number of profile samples that could not be pushed into the pipeline. + unit: "{samples}" + sum: + value_type: int + monotonic: true receiver_refused_spans: enabled: true - stability: - level: alpha + stability: alpha description: Number of spans that could not be pushed into the pipeline. unit: "{spans}" sum: @@ -90,8 +105,7 @@ telemetry: monotonic: true receiver_requests: enabled: true - stability: - level: alpha + stability: alpha description: The number of requests performed. unit: "{requests}" sum: diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go index eb7fa68043a..ece4d7efac0 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go @@ -16,6 +16,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receiverhelper/internal" "go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata" @@ -126,6 +127,24 @@ func (rec *ObsReport) EndMetricsOp( rec.endOp(receiverCtx, format, numReceivedPoints, err, pipeline.SignalMetrics) } +// StartProfilesOp is called when a request is received from a client. +// The returned context should be used in other calls to the obsreport functions +// dealing with the same receive operation. +func (rec *ObsReport) StartProfilesOp(operationCtx context.Context) context.Context { + return rec.startOp(operationCtx, internal.ReceiverProfilesOperationSuffix) +} + +// EndProfilesOp completes the receive operation that was started with +// StartProfilesOp. +func (rec *ObsReport) EndProfilesOp( + receiverCtx context.Context, + format string, + numReceivedProfileSamples int, + err error, +) { + rec.endOp(receiverCtx, format, numReceivedProfileSamples, err, xpipeline.SignalProfiles) +} + // startOp creates the span used to trace the operation. Returning // the updated context with the created span. func (rec *ObsReport) startOp(receiverCtx context.Context, operationSuffix string) context.Context { @@ -211,6 +230,10 @@ func (rec *ObsReport) endOp( acceptedItemsKey = internal.AcceptedLogRecordsKey refusedItemsKey = internal.RefusedLogRecordsKey failedItemsKey = internal.FailedLogRecordsKey + case xpipeline.SignalProfiles: + acceptedItemsKey = internal.AcceptedProfileSamplesKey + refusedItemsKey = internal.RefusedProfileSamplesKey + failedItemsKey = internal.FailedProfileSamplesKey } span.SetAttributes( @@ -241,6 +264,10 @@ func (rec *ObsReport) recordMetrics(receiverCtx context.Context, signal pipeline acceptedMeasure = rec.telemetryBuilder.ReceiverAcceptedLogRecords refusedMeasure = rec.telemetryBuilder.ReceiverRefusedLogRecords failedMeasure = rec.telemetryBuilder.ReceiverFailedLogRecords + case xpipeline.SignalProfiles: + acceptedMeasure = rec.telemetryBuilder.ReceiverAcceptedProfileSamples + refusedMeasure = rec.telemetryBuilder.ReceiverRefusedProfileSamples + failedMeasure = rec.telemetryBuilder.ReceiverFailedProfileSamples } acceptedMeasure.Add(receiverCtx, int64(numAccepted), rec.otelAttrs) diff --git a/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go b/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go index 048130f73c8..58c70c43672 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go @@ -146,9 +146,7 @@ func checkConsumeContractScenario(params CheckConsumeContractParams, decisionFun // The total number of generator calls will be equal to params.GenerateCount. for range concurrency { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { for atomic.AddInt64(&generatedIndex, 1) <= int64(params.GenerateCount) { ids := params.Generator.Generate() require.NotEmpty(params.T, ids) @@ -161,7 +159,7 @@ func checkConsumeContractScenario(params CheckConsumeContractParams, decisionFun // generated data set. require.Empty(params.T, duplicates) } - }() + }) } // Wait until all generator goroutines are done. diff --git a/vendor/go.opentelemetry.io/collector/receiver/xreceiver/profiles.go b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/receiver.go similarity index 75% rename from vendor/go.opentelemetry.io/collector/receiver/xreceiver/profiles.go rename to vendor/go.opentelemetry.io/collector/receiver/xreceiver/receiver.go index 8a912119ab2..9c0ce0e6a5a 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/xreceiver/profiles.go +++ b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/receiver.go @@ -8,9 +8,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/internal/componentalias" "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/receiver" - "go.opentelemetry.io/collector/receiver/internal" ) // Profiles receiver receives profiles. @@ -44,18 +44,20 @@ type CreateProfilesFunc func(context.Context, receiver.Settings, component.Confi // FactoryOption apply changes to Factory. type FactoryOption interface { // applyOption applies the option. - applyOption(o *factoryOpts) + applyOption(o *factory) } // factoryOptionFunc is a FactoryOption created through a function. -type factoryOptionFunc func(*factoryOpts) +type factoryOptionFunc func(*factory) -func (f factoryOptionFunc) applyOption(o *factoryOpts) { +func (f factoryOptionFunc) applyOption(o *factory) { f(o) } type factory struct { receiver.Factory + componentalias.TypeAliasHolder + opts []receiver.FactoryOption createProfilesFunc CreateProfilesFunc profilesStabilityLevel component.StabilityLevel } @@ -68,52 +70,56 @@ func (f *factory) CreateProfiles(ctx context.Context, set receiver.Settings, cfg if f.createProfilesFunc == nil { return nil, pipeline.ErrSignalNotSupported } - if set.ID.Type() != f.Type() { - return nil, internal.ErrIDMismatch(set.ID, f.Type()) + if err := componentalias.ValidateComponentType(f.Factory, set.ID); err != nil { + return nil, err } return f.createProfilesFunc(ctx, set, cfg, next) } -type factoryOpts struct { - opts []receiver.FactoryOption - *factory -} - // WithTraces overrides the default "error not supported" implementation for Factory.CreateTraces and the default "undefined" stability level. func WithTraces(createTraces receiver.CreateTracesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, receiver.WithTraces(createTraces, sl)) }) } // WithMetrics overrides the default "error not supported" implementation for Factory.CreateMetrics and the default "undefined" stability level. func WithMetrics(createMetrics receiver.CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, receiver.WithMetrics(createMetrics, sl)) }) } // WithLogs overrides the default "error not supported" implementation for Factory.CreateLogs and the default "undefined" stability level. func WithLogs(createLogs receiver.CreateLogsFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.opts = append(o.opts, receiver.WithLogs(createLogs, sl)) }) } // WithProfiles overrides the default "error not supported" implementation for Factory.CreateProfiles and the default "undefined" stability level. func WithProfiles(createProfiles CreateProfilesFunc, sl component.StabilityLevel) FactoryOption { - return factoryOptionFunc(func(o *factoryOpts) { + return factoryOptionFunc(func(o *factory) { o.profilesStabilityLevel = sl o.createProfilesFunc = createProfiles }) } -// NewFactory returns a Factory. +// WithDeprecatedTypeAlias configures a deprecated type alias for the receiver. Only one alias is supported per receiver. +// When the alias is used in configuration, a deprecation warning is automatically logged. +func WithDeprecatedTypeAlias(alias component.Type) FactoryOption { + return factoryOptionFunc(func(o *factory) { + o.SetDeprecatedAlias(alias) + }) +} + +// NewFactory creates a wrapped receiver.Factory with experimental capabilities. func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { - opts := factoryOpts{factory: &factory{}} + f := &factory{TypeAliasHolder: componentalias.NewTypeAliasHolder()} for _, opt := range options { - opt.applyOption(&opts) + opt.applyOption(f) } - opts.Factory = receiver.NewFactory(cfgType, createDefaultConfig, opts.opts...) - return opts.factory + f.Factory = receiver.NewFactory(cfgType, createDefaultConfig, f.opts...) + f.Factory.(componentalias.TypeAliasHolder).SetDeprecatedAlias(f.DeprecatedAlias()) + return f } diff --git a/vendor/go.opentelemetry.io/collector/service/README.md b/vendor/go.opentelemetry.io/collector/service/README.md index 68d0ddb16a2..d01d313ac03 100644 --- a/vendor/go.opentelemetry.io/collector/service/README.md +++ b/vendor/go.opentelemetry.io/collector/service/README.md @@ -1,181 +1,11 @@ -# OpenTelemetry Collector Service - -## How to provide configuration? - -The `--config` flag accepts either a file path or values in the form of a config URI `":"`. -Currently, the OpenTelemetry Collector supports the following providers `scheme`: -- [file](../confmap/provider/fileprovider/provider.go) - Reads configuration from a file. E.g. `file:path/to/config.yaml`. -- [env](../confmap/provider/envprovider/provider.go) - Reads configuration from an environment variable. E.g. `env:MY_CONFIG_IN_AN_ENVVAR`. -- [yaml](../confmap/provider/yamlprovider/provider.go) - Reads configuration from yaml bytes. E.g. `yaml:exporters::debug::verbosity: detailed`. -- [http](../confmap/provider/httpprovider/provider.go) - Reads configuration from a HTTP URI. E.g. `http://www.example.com` - -For more technical details about how configuration is resolved you can read the [configuration resolving design](../confmap/README.md#configuration-resolving). - -### Single Config Source - -1. Simple local file: - - `./otelcorecol --config=examples/local/otel-config.yaml` - -2. Simple local file using the new URI format: - - `./otelcorecol --config=file:examples/local/otel-config.yaml` - -3. Config provided via an environment variable: - - `./otelcorecol --config=env:MY_CONFIG_IN_AN_ENVVAR` - - -### Multiple Config Sources - -1. Merge a `otel-config.yaml` file with the content of an environment variable `MY_OTHER_CONFIG` and use the merged result as the config: - - `./otelcorecol --config=file:examples/local/otel-config.yaml --config=env:MY_OTHER_CONFIG` - -2. Merge a `config.yaml` file with the content of a yaml bytes configuration (overwrites the `exporters::debug::verbosity` config) and use the content as the config: - - `./otelcorecol --config=file:examples/local/otel-config.yaml --config="yaml:exporters::debug::verbosity: normal"` - -### Embedding other configuration providers - -One configuration provider can also make references to other config providers, like the following: - -```yaml -receivers: - otlp: - protocols: - grpc: - -exporters: ${file:otlp-exporter.yaml} - -service: - extensions: [ ] - pipelines: - traces: - receivers: [ otlp ] - processors: [ ] - exporters: [ otlp ] -``` - -## How to override config properties? - -The `--set` flag allows to set arbitrary config property. The `--set` values are merged into the final configuration -after all the sources specified by the `--config` are resolved and merged. - -### The Format and Limitations of `--set` - -#### Simple property - -The `--set` option takes always one key/value pair, and it is used like this: `--set key=value`. The YAML equivalent of that is: - -```yaml -key: value -``` - -#### Complex nested keys - -Use dot (`.`) in the pair's name as key separator to reference nested map values. For example, `--set outer.inner=value` is translated into this: - -```yaml -outer: - inner: value -``` - -#### Multiple values - -To set multiple values specify multiple --set flags, so `--set a=b --set c=d` becomes: - -```yaml -a: b -c: d -``` - - -#### Array values - -Arrays can be expressed by enclosing values in `[]`. For example, `--set "key=[a, b, c]"` translates to: - -```yaml -key: - - a - - b - - c -``` - -#### Map values - -Maps can be expressed by enclosing values in `{}`. For example, `"--set "key={a: c}"` translates to: - -```yaml -key: - a: c -``` - -#### Limitations - -1. Does not support setting a key that contains a dot `.`. -2. Does not support setting a key that contains a equal sign `=`. -3. The configuration key separator inside the value part of the property is "::". For example `--set "name={a::b: c}"` is equivalent with `--set name.a.b=c`. - -## How to check components available in a distribution - -Use the sub command build-info. Below is an example: - -```bash - ./otelcorecol components -``` -Sample output: - -```yaml -buildinfo: - command: otelcorecol - description: Local OpenTelemetry Collector binary, testing only. - version: 0.62.1-dev -receivers: - - otlp -processors: - - memory_limiter -exporters: - - otlp - - otlphttp - - debug -extensions: - - zpages -``` - -## How to validate configuration file and return all errors without running collector - -```bash - ./otelcorecol validate --config=file:examples/local/otel-config.yaml -``` - -## How to examine the final configuration after resolving, parsing and validating? - -Use `print-config` in the default mode (`--mode=redacted`) and `--feature-gates=otelcol.printInitialConfig`: - -```bash - ./otelcorecol print-config --config=file:examples/local/otel-config.yaml -``` - -Note that by default the configuration will only print when it is -valid, and that sensitive information will be redacted. To print a -potentially invalid configuration, use `--validate=false`. - -## How to examine the final configuration including sensitive fields? - -Use `print-config` with `--mode=unredacted` and `--feature-gates=otelcol.printInitialConfig`: - -```bash - ./otelcorecol print-config --mode=unredacted --config=file:examples/local/otel-config.yaml -``` - -## How to print the final configuration in JSON format? - -Use `print-config` with `--format=json` and -`--feature-gates=otelcol.printInitialConfig`. Note that JSON format is -considered unstable. - -```bash - ./otelcorecol print-config --format=json --config=file:examples/local/otel-config.yaml -``` - + +# Service +| Status | | +| ------------- |-----------| +| Stability | [development]: traces, metrics, logs, profiles | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Apkg%2Fservice%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Apkg%2Fservice) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Apkg%2Fservice%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Apkg%2Fservice) | + +[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development +[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib + diff --git a/vendor/go.opentelemetry.io/collector/service/documentation.md b/vendor/go.opentelemetry.io/collector/service/documentation.md index de9fa2e70f7..11b3adced66 100644 --- a/vendor/go.opentelemetry.io/collector/service/documentation.md +++ b/vendor/go.opentelemetry.io/collector/service/documentation.md @@ -10,94 +10,106 @@ The following telemetry is emitted by this component. Number of items passed to the connector. -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| {item} | Sum | Int | true | +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {item} | Sum | Int | true | Development | ### otelcol.connector.produced.items Number of items emitted from the connector. -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| {item} | Sum | Int | true | +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {item} | Sum | Int | true | Development | ### otelcol.exporter.consumed.items Number of items passed to the exporter. -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| {item} | Sum | Int | true | +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {item} | Sum | Int | true | Development | ### otelcol_process_cpu_seconds -Total CPU user and system time in seconds [alpha] +Total CPU user and system time in seconds | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| s | Sum | Double | true | alpha | +| s | Sum | Double | true | Alpha | ### otelcol_process_memory_rss -Total physical memory (resident set size) [alpha] +Total physical memory (resident set size) | Unit | Metric Type | Value Type | Stability | | ---- | ----------- | ---------- | --------- | -| By | Gauge | Int | alpha | +| By | Gauge | Int | Alpha | ### otelcol_process_runtime_heap_alloc_bytes -Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') [alpha] +Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') | Unit | Metric Type | Value Type | Stability | | ---- | ----------- | ---------- | --------- | -| By | Gauge | Int | alpha | +| By | Gauge | Int | Alpha | ### otelcol_process_runtime_total_alloc_bytes -Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') [alpha] +Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| By | Sum | Int | true | alpha | +| By | Sum | Int | true | Alpha | ### otelcol_process_runtime_total_sys_memory_bytes -Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') [alpha] +Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') | Unit | Metric Type | Value Type | Stability | | ---- | ----------- | ---------- | --------- | -| By | Gauge | Int | alpha | +| By | Gauge | Int | Alpha | ### otelcol_process_uptime -Uptime of the process [alpha] +Uptime of the process | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | -| s | Sum | Double | true | alpha | +| s | Sum | Double | true | Alpha | ### otelcol.processor.consumed.items Number of items passed to the processor. -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| {item} | Sum | Int | true | +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {item} | Sum | Int | true | Development | ### otelcol.processor.produced.items Number of items emitted from the processor. -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| {item} | Sum | Int | true | +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {item} | Sum | Int | true | Development | ### otelcol.receiver.produced.items Number of items emitted from the receiver. -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| {item} | Sum | Int | true | +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {item} | Sum | Int | true | Development | + +## Feature Gates + +This component has the following feature gates: + +| Feature Gate | Stage | Description | From Version | To Version | Reference | +| ------------ | ----- | ----------- | ------------ | ---------- | --------- | +| `service.AllowNoPipelines` | alpha | Allow starting the Collector without starting any pipelines. | v0.122.0 | N/A | [Link](https://github.com/open-telemetry/opentelemetry-collector/pull/12613) | +| `service.profilesSupport` | alpha | Controls whether profiles support can be enabled | v0.112.0 | N/A | [Link](https://github.com/open-telemetry/opentelemetry-collector/pull/11477) | +| `telemetry.UseLocalHostAsDefaultMetricsAddress` | beta | Controls whether default Prometheus metrics server use localhost as the default host for their endpoints | v0.111.0 | N/A | [Link](https://github.com/open-telemetry/opentelemetry-collector/pull/11251) | + +For more information about feature gates, see the [Feature Gates](https://github.com/open-telemetry/opentelemetry-collector/blob/main/featuregate/README.md) documentation. diff --git a/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go b/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go index 4786eed7126..cc9265ce7dd 100644 --- a/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go +++ b/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go @@ -17,10 +17,9 @@ import ( "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/extension/extensioncapabilities" - "go.opentelemetry.io/collector/internal/telemetry" - "go.opentelemetry.io/collector/internal/telemetry/componentattribute" "go.opentelemetry.io/collector/service/internal/attribute" "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/componentattribute" "go.opentelemetry.io/collector/service/internal/status" "go.opentelemetry.io/collector/service/internal/zpages" ) @@ -40,8 +39,8 @@ type Extensions struct { func (bes *Extensions) Start(ctx context.Context, host component.Host) error { bes.telemetry.Logger.Info("Starting extensions...") for _, extID := range bes.extensionIDs { - extLogger := componentattribute.ZapLoggerWithAttributes(bes.telemetry.Logger, - *attribute.Extension(extID).Set()) + extLogger := componentattribute.LoggerWithAttributes(bes.telemetry.Logger, + attribute.Extension(extID).Set().ToSlice()) extLogger.Info("Extension is starting...") instanceID := bes.instanceIDs[extID] ext := bes.extMap[extID] @@ -214,7 +213,7 @@ func New(ctx context.Context, set Settings, cfg Config, options ...Option) (*Ext instanceID := componentstatus.NewInstanceID(extID, component.KindExtension) extSet := extension.Settings{ ID: extID, - TelemetrySettings: telemetry.WithAttributeSet(set.Telemetry, *attribute.Extension(extID).Set()), + TelemetrySettings: componentattribute.TelemetrySettingsWithAttributes(set.Telemetry, *attribute.Extension(extID).Set()), BuildInfo: set.BuildInfo, } diff --git a/vendor/go.opentelemetry.io/collector/service/hostcapabilities/interfaces.go b/vendor/go.opentelemetry.io/collector/service/hostcapabilities/interfaces.go index 6063cdee7d9..4df360994fb 100644 --- a/vendor/go.opentelemetry.io/collector/service/hostcapabilities/interfaces.go +++ b/vendor/go.opentelemetry.io/collector/service/hostcapabilities/interfaces.go @@ -21,6 +21,7 @@ type ModuleInfo interface { // ExposeExporters is an interface that may be implemented by the host to provide // access to the exporters that were used to build the host. +// // Deprecated: [v0.121.0] Will be removed in Service 1.0. // See: https://github.com/open-telemetry/opentelemetry-collector/issues/7370 for service 1.0 type ExposeExporters interface { diff --git a/vendor/go.opentelemetry.io/collector/service/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/collector/service/internal/attribute/attribute.go index 107c38f82dd..97c467df3b4 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/attribute/attribute.go @@ -10,7 +10,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/internal/telemetry/componentattribute" + "go.opentelemetry.io/collector/internal/telemetry" "go.opentelemetry.io/collector/pipeline" ) @@ -49,55 +49,55 @@ func (a Attributes) ID() int64 { func Receiver(pipelineType pipeline.Signal, id component.ID) Attributes { return newAttributes( - attribute.String(componentattribute.ComponentKindKey, strings.ToLower(component.KindReceiver.String())), - attribute.String(componentattribute.SignalKey, pipelineType.String()), - attribute.String(componentattribute.ComponentIDKey, id.String()), + attribute.String(telemetry.ComponentKindKey, strings.ToLower(component.KindReceiver.String())), + attribute.String(telemetry.SignalKey, pipelineType.String()), + attribute.String(telemetry.ComponentIDKey, id.String()), ) } func Processor(pipelineID pipeline.ID, id component.ID) Attributes { return newAttributes( - attribute.String(componentattribute.ComponentKindKey, strings.ToLower(component.KindProcessor.String())), - attribute.String(componentattribute.SignalKey, pipelineID.Signal().String()), - attribute.String(componentattribute.PipelineIDKey, pipelineID.String()), - attribute.String(componentattribute.ComponentIDKey, id.String()), + attribute.String(telemetry.ComponentKindKey, strings.ToLower(component.KindProcessor.String())), + attribute.String(telemetry.SignalKey, pipelineID.Signal().String()), + attribute.String(telemetry.PipelineIDKey, pipelineID.String()), + attribute.String(telemetry.ComponentIDKey, id.String()), ) } func Exporter(pipelineType pipeline.Signal, id component.ID) Attributes { return newAttributes( - attribute.String(componentattribute.ComponentKindKey, strings.ToLower(component.KindExporter.String())), - attribute.String(componentattribute.SignalKey, pipelineType.String()), - attribute.String(componentattribute.ComponentIDKey, id.String()), + attribute.String(telemetry.ComponentKindKey, strings.ToLower(component.KindExporter.String())), + attribute.String(telemetry.SignalKey, pipelineType.String()), + attribute.String(telemetry.ComponentIDKey, id.String()), ) } func Connector(exprPipelineType, rcvrPipelineType pipeline.Signal, id component.ID) Attributes { return newAttributes( - attribute.String(componentattribute.ComponentKindKey, strings.ToLower(component.KindConnector.String())), - attribute.String(componentattribute.SignalKey, exprPipelineType.String()), - attribute.String(componentattribute.SignalOutputKey, rcvrPipelineType.String()), - attribute.String(componentattribute.ComponentIDKey, id.String()), + attribute.String(telemetry.ComponentKindKey, strings.ToLower(component.KindConnector.String())), + attribute.String(telemetry.SignalKey, exprPipelineType.String()), + attribute.String(telemetry.SignalOutputKey, rcvrPipelineType.String()), + attribute.String(telemetry.ComponentIDKey, id.String()), ) } func Extension(id component.ID) Attributes { return newAttributes( - attribute.String(componentattribute.ComponentKindKey, strings.ToLower(component.KindExtension.String())), - attribute.String(componentattribute.ComponentIDKey, id.String()), + attribute.String(telemetry.ComponentKindKey, strings.ToLower(component.KindExtension.String())), + attribute.String(telemetry.ComponentIDKey, id.String()), ) } func Capabilities(pipelineID pipeline.ID) Attributes { return newAttributes( - attribute.String(componentattribute.ComponentKindKey, capabiltiesKind), - attribute.String(componentattribute.PipelineIDKey, pipelineID.String()), + attribute.String(telemetry.ComponentKindKey, capabiltiesKind), + attribute.String(telemetry.PipelineIDKey, pipelineID.String()), ) } func Fanout(pipelineID pipeline.ID) Attributes { return newAttributes( - attribute.String(componentattribute.ComponentKindKey, fanoutKind), - attribute.String(componentattribute.PipelineIDKey, pipelineID.String()), + attribute.String(telemetry.ComponentKindKey, fanoutKind), + attribute.String(telemetry.PipelineIDKey, pipelineID.String()), ) } diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/builders.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/builders.go index 4e332b3012a..fd773d7b3b0 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/builders/builders.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/builders.go @@ -5,10 +5,12 @@ package builders // import "go.opentelemetry.io/collector/service/internal/build import ( "errors" + "fmt" "go.uber.org/zap" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/internal/componentalias" ) var ( @@ -26,3 +28,15 @@ func logStabilityLevel(logger *zap.Logger, sl component.StabilityLevel) { logger.Info(sl.LogMessage()) } } + +// logDeprecatedTypeAlias checks if the provided type is a deprecated alias and logs a warning if so. +func logDeprecatedTypeAlias(logger *zap.Logger, factory component.Factory, usedType component.Type) { + tah, ok := factory.(componentalias.TypeAliasHolder) + if !ok { + return + } + alias := tah.DeprecatedAlias() + if alias.String() != "" && usedType == alias { + logger.Warn(fmt.Sprintf("%q alias is deprecated; use %q instead", alias.String(), factory.Type().String())) + } +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/connector.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/connector.go index dffc3bb0f65..27519be1fd8 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/builders/connector.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/connector.go @@ -47,6 +47,7 @@ func (b *ConnectorBuilder) CreateTracesToTraces(ctx context.Context, set connect return nil, fmt.Errorf("connector factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.TracesToTracesStability()) return f.CreateTracesToTraces(ctx, set, cfg, next) } @@ -66,6 +67,7 @@ func (b *ConnectorBuilder) CreateTracesToMetrics(ctx context.Context, set connec return nil, fmt.Errorf("connector factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.TracesToMetricsStability()) return f.CreateTracesToMetrics(ctx, set, cfg, next) } @@ -85,6 +87,7 @@ func (b *ConnectorBuilder) CreateTracesToLogs(ctx context.Context, set connector return nil, fmt.Errorf("connector factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.TracesToLogsStability()) return f.CreateTracesToLogs(ctx, set, cfg, next) } @@ -109,6 +112,7 @@ func (b *ConnectorBuilder) CreateTracesToProfiles(ctx context.Context, set conne return nil, errDataTypes(set.ID, pipeline.SignalTraces, xpipeline.SignalProfiles) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.TracesToProfilesStability()) return f.CreateTracesToProfiles(ctx, set, cfg, next) } @@ -128,6 +132,7 @@ func (b *ConnectorBuilder) CreateMetricsToTraces(ctx context.Context, set connec return nil, fmt.Errorf("connector factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.MetricsToTracesStability()) return f.CreateMetricsToTraces(ctx, set, cfg, next) } @@ -147,6 +152,7 @@ func (b *ConnectorBuilder) CreateMetricsToMetrics(ctx context.Context, set conne return nil, fmt.Errorf("connector factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.MetricsToMetricsStability()) return f.CreateMetricsToMetrics(ctx, set, cfg, next) } @@ -166,6 +172,7 @@ func (b *ConnectorBuilder) CreateMetricsToLogs(ctx context.Context, set connecto return nil, fmt.Errorf("connector factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.MetricsToLogsStability()) return f.CreateMetricsToLogs(ctx, set, cfg, next) } @@ -190,6 +197,7 @@ func (b *ConnectorBuilder) CreateMetricsToProfiles(ctx context.Context, set conn return nil, errDataTypes(set.ID, pipeline.SignalMetrics, xpipeline.SignalProfiles) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.MetricsToProfilesStability()) return f.CreateMetricsToProfiles(ctx, set, cfg, next) } @@ -209,6 +217,7 @@ func (b *ConnectorBuilder) CreateLogsToTraces(ctx context.Context, set connector return nil, fmt.Errorf("connector factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.LogsToTracesStability()) return f.CreateLogsToTraces(ctx, set, cfg, next) } @@ -228,6 +237,7 @@ func (b *ConnectorBuilder) CreateLogsToMetrics(ctx context.Context, set connecto return nil, fmt.Errorf("connector factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.LogsToMetricsStability()) return f.CreateLogsToMetrics(ctx, set, cfg, next) } @@ -247,6 +257,7 @@ func (b *ConnectorBuilder) CreateLogsToLogs(ctx context.Context, set connector.S return nil, fmt.Errorf("connector factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.LogsToLogsStability()) return f.CreateLogsToLogs(ctx, set, cfg, next) } @@ -271,6 +282,7 @@ func (b *ConnectorBuilder) CreateLogsToProfiles(ctx context.Context, set connect return nil, errDataTypes(set.ID, pipeline.SignalLogs, xpipeline.SignalProfiles) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.LogsToProfilesStability()) return f.CreateLogsToProfiles(ctx, set, cfg, next) } @@ -295,6 +307,7 @@ func (b *ConnectorBuilder) CreateProfilesToTraces(ctx context.Context, set conne return nil, errDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalTraces) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.ProfilesToTracesStability()) return f.CreateProfilesToTraces(ctx, set, cfg, next) } @@ -319,6 +332,7 @@ func (b *ConnectorBuilder) CreateProfilesToMetrics(ctx context.Context, set conn return nil, errDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalMetrics) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.ProfilesToMetricsStability()) return f.CreateProfilesToMetrics(ctx, set, cfg, next) } @@ -343,6 +357,7 @@ func (b *ConnectorBuilder) CreateProfilesToLogs(ctx context.Context, set connect return nil, errDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalLogs) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.ProfilesToLogsStability()) return f.CreateProfilesToLogs(ctx, set, cfg, next) } @@ -367,6 +382,7 @@ func (b *ConnectorBuilder) CreateProfilesToProfiles(ctx context.Context, set con return nil, errDataTypes(set.ID, xpipeline.SignalProfiles, xpipeline.SignalProfiles) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.ProfilesToProfilesStability()) return f.CreateProfilesToProfiles(ctx, set, cfg, next) } diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/exporter.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/exporter.go index bb2367d22b7..9520eb4eddb 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/builders/exporter.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/exporter.go @@ -37,6 +37,7 @@ func (b *ExporterBuilder) CreateTraces(ctx context.Context, set exporter.Setting return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.TracesStability()) return f.CreateTraces(ctx, set, cfg) } @@ -53,6 +54,7 @@ func (b *ExporterBuilder) CreateMetrics(ctx context.Context, set exporter.Settin return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.MetricsStability()) return f.CreateMetrics(ctx, set, cfg) } @@ -69,6 +71,7 @@ func (b *ExporterBuilder) CreateLogs(ctx context.Context, set exporter.Settings) return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.LogsStability()) return f.CreateLogs(ctx, set, cfg) } @@ -90,6 +93,7 @@ func (b *ExporterBuilder) CreateProfiles(ctx context.Context, set exporter.Setti return nil, pipeline.ErrSignalNotSupported } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.ProfilesStability()) return f.CreateProfiles(ctx, set, cfg) } diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/extension.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/extension.go index 35c2bead4aa..c7f37937048 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/builders/extension.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/extension.go @@ -43,12 +43,8 @@ func (b *ExtensionBuilder) Create(ctx context.Context, set extension.Settings) ( return nil, fmt.Errorf("extension factory not available for: %q", set.ID) } - sl := f.Stability() - if sl >= component.StabilityLevelAlpha { - set.Logger.Debug(sl.LogMessage()) - } else { - set.Logger.Info(sl.LogMessage()) - } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) + logStabilityLevel(set.Logger, f.Stability()) return f.Create(ctx, set, cfg) } diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/processor.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/processor.go index 8e19bed3ea9..023f41d17a5 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/builders/processor.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/processor.go @@ -43,6 +43,7 @@ func (b *ProcessorBuilder) CreateTraces(ctx context.Context, set processor.Setti return nil, fmt.Errorf("processor factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.TracesStability()) return f.CreateTraces(ctx, set, cfg, next) } @@ -62,6 +63,7 @@ func (b *ProcessorBuilder) CreateMetrics(ctx context.Context, set processor.Sett return nil, fmt.Errorf("processor factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.MetricsStability()) return f.CreateMetrics(ctx, set, cfg, next) } @@ -81,6 +83,7 @@ func (b *ProcessorBuilder) CreateLogs(ctx context.Context, set processor.Setting return nil, fmt.Errorf("processor factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.LogsStability()) return f.CreateLogs(ctx, set, cfg, next) } @@ -104,6 +107,7 @@ func (b *ProcessorBuilder) CreateProfiles(ctx context.Context, set processor.Set if !ok { return nil, pipeline.ErrSignalNotSupported } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.ProfilesStability()) return f.CreateProfiles(ctx, set, cfg, next) } diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/receiver.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/receiver.go index 2568392622b..2277b1b4ca3 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/builders/receiver.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/receiver.go @@ -44,6 +44,7 @@ func (b *ReceiverBuilder) CreateTraces(ctx context.Context, set receiver.Setting return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.TracesStability()) return f.CreateTraces(ctx, set, cfg, next) } @@ -63,6 +64,7 @@ func (b *ReceiverBuilder) CreateMetrics(ctx context.Context, set receiver.Settin return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.MetricsStability()) return f.CreateMetrics(ctx, set, cfg, next) } @@ -82,6 +84,7 @@ func (b *ReceiverBuilder) CreateLogs(ctx context.Context, set receiver.Settings, return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.LogsStability()) return f.CreateLogs(ctx, set, cfg, next) } @@ -106,6 +109,7 @@ func (b *ReceiverBuilder) CreateProfiles(ctx context.Context, set receiver.Setti return nil, pipeline.ErrSignalNotSupported } + logDeprecatedTypeAlias(set.Logger, f, set.ID.Type()) logStabilityLevel(set.Logger, f.ProfilesStability()) return f.CreateProfiles(ctx, set, cfg, next) } diff --git a/vendor/go.opentelemetry.io/collector/service/internal/componentattribute/logger_zap.go b/vendor/go.opentelemetry.io/collector/service/internal/componentattribute/logger_zap.go new file mode 100644 index 00000000000..a9c52eb4577 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/componentattribute/logger_zap.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package componentattribute // import "go.opentelemetry.io/collector/service/internal/componentattribute" + +import ( + "slices" + + "go.opentelemetry.io/otel/attribute" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/collector/internal/telemetry" +) + +// This wrapper around zapcore.Field tells the Zap -> OTel bridge that the field +// should be turned into an instrumentation scope instead of a set of log record attributes. +type scopeAttributesField struct { + fields []zapcore.Field + attrs []attribute.KeyValue +} + +var _ zapcore.ObjectMarshaler = scopeAttributesField{} + +func (saf scopeAttributesField) MarshalLogObject(enc zapcore.ObjectEncoder) error { + for _, field := range saf.fields { + field.AddTo(enc) + } + return nil +} + +func makeScopeField(attrs []attribute.KeyValue) zap.Field { + return zap.Inline(scopeAttributesField{ + fields: telemetry.ToZapFields(attrs), + attrs: attrs, + }) +} + +func ExtractLogScopeAttributes(field zap.Field) ([]attribute.KeyValue, bool) { + if field.Type != zapcore.InlineMarshalerType { + return nil, false + } + saf, ok := field.Interface.(scopeAttributesField) + if !ok { + return nil, false + } + return saf.attrs, true +} + +type coreWithAttributes struct { + zapcore.Core + sourceCore zapcore.Core + attrs []attribute.KeyValue + withFields []zap.Field +} + +var _ zapcore.Core = coreWithAttributes{} + +func (cwa coreWithAttributes) With(fields []zapcore.Field) zapcore.Core { + cwa.withFields = append(cwa.withFields, fields...) + cwa.Core = cwa.Core.With(fields) + return cwa +} + +func LoggerWithAttributes(logger *zap.Logger, attrs []attribute.KeyValue) *zap.Logger { + return logger.WithOptions(zap.WrapCore(func(c zapcore.Core) zapcore.Core { + return coreWithAttributes{ + Core: c.With([]zap.Field{makeScopeField(attrs)}), + sourceCore: c, + attrs: attrs, + } + })) +} + +func (cwa coreWithAttributes) DropInjectedAttributes(droppedAttrs ...string) zapcore.Core { + cwa.attrs = slices.DeleteFunc(slices.Clone(cwa.attrs), func(kv attribute.KeyValue) bool { + return slices.Contains(droppedAttrs, string(kv.Key)) + }) + cwa.Core = cwa.sourceCore.With(append([]zap.Field{makeScopeField(cwa.attrs)}, cwa.withFields...)) + return cwa +} diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/meter_provider.go b/vendor/go.opentelemetry.io/collector/service/internal/componentattribute/meter_provider.go similarity index 68% rename from vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/meter_provider.go rename to vendor/go.opentelemetry.io/collector/service/internal/componentattribute/meter_provider.go index d17732dde58..2d6646dc6d3 100644 --- a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/meter_provider.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/componentattribute/meter_provider.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package componentattribute // import "go.opentelemetry.io/collector/internal/telemetry/componentattribute" +package componentattribute // import "go.opentelemetry.io/collector/service/internal/componentattribute" import ( "slices" @@ -15,17 +15,6 @@ type meterProviderWithAttributes struct { attrs []attribute.KeyValue } -// MeterProviderWithAttributes creates a MeterProvider with a new set of injected instrumentation scope attributes. -func MeterProviderWithAttributes(mp metric.MeterProvider, attrs attribute.Set) metric.MeterProvider { - if mpwa, ok := mp.(meterProviderWithAttributes); ok { - mp = mpwa.MeterProvider - } - return meterProviderWithAttributes{ - MeterProvider: mp, - attrs: attrs.ToSlice(), - } -} - func (mpwa meterProviderWithAttributes) Meter(name string, opts ...metric.MeterOption) metric.Meter { conf := metric.NewMeterConfig(opts...) attrSet := conf.InstrumentationAttributes() @@ -35,3 +24,12 @@ func (mpwa meterProviderWithAttributes) Meter(name string, opts ...metric.MeterO opts = append(opts, metric.WithInstrumentationAttributes(newAttrs...)) return mpwa.MeterProvider.Meter(name, opts...) } + +func (mpwa meterProviderWithAttributes) DropInjectedAttributes(droppedAttrs ...string) metric.MeterProvider { + return meterProviderWithAttributes{ + MeterProvider: mpwa.MeterProvider, + attrs: slices.DeleteFunc(slices.Clone(mpwa.attrs), func(kv attribute.KeyValue) bool { + return slices.Contains(droppedAttrs, string(kv.Key)) + }), + } +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/componentattribute/telemetry.go b/vendor/go.opentelemetry.io/collector/service/internal/componentattribute/telemetry.go new file mode 100644 index 00000000000..cea5df76450 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/componentattribute/telemetry.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package componentattribute // import "go.opentelemetry.io/collector/service/internal/componentattribute" + +import ( + "go.opentelemetry.io/otel/attribute" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/internal/telemetry" +) + +func TelemetrySettingsWithAttributes(ts component.TelemetrySettings, attrSet attribute.Set) component.TelemetrySettings { + attrs := attrSet.ToSlice() + ts.Logger = LoggerWithAttributes(ts.Logger, attrs) + ts.TracerProvider = tracerProviderWithAttributes{ + TracerProvider: ts.TracerProvider, + attrs: attrs, + } + if telemetry.NewPipelineTelemetryGate.IsEnabled() { + ts.MeterProvider = meterProviderWithAttributes{ + MeterProvider: ts.MeterProvider, + attrs: attrs, + } + } + return ts +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/componentattribute/tracer_provider.go b/vendor/go.opentelemetry.io/collector/service/internal/componentattribute/tracer_provider.go new file mode 100644 index 00000000000..79ce3ca9766 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/componentattribute/tracer_provider.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package componentattribute // import "go.opentelemetry.io/collector/service/internal/componentattribute" + +import ( + "slices" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +type tracerProviderWithAttributes struct { + trace.TracerProvider + attrs []attribute.KeyValue +} + +func (tpwa tracerProviderWithAttributes) Tracer(name string, options ...trace.TracerOption) trace.Tracer { + conf := trace.NewTracerConfig(options...) + attrSet := conf.InstrumentationAttributes() + // prepend our attributes so they can be overwritten + newAttrs := append(slices.Clone(tpwa.attrs), attrSet.ToSlice()...) + // append our attribute set option to overwrite the old one + options = append(options, trace.WithInstrumentationAttributes(newAttrs...)) + return tpwa.TracerProvider.Tracer(name, options...) +} + +func (tpwa tracerProviderWithAttributes) Unwrap() trace.TracerProvider { + return tpwa.TracerProvider +} + +func (tpwa tracerProviderWithAttributes) DropInjectedAttributes(droppedAttrs ...string) trace.TracerProvider { + return tracerProviderWithAttributes{ + TracerProvider: tpwa.TracerProvider, + attrs: slices.DeleteFunc(slices.Clone(tpwa.attrs), func(kv attribute.KeyValue) bool { + return slices.Contains(droppedAttrs, string(kv.Key)) + }), + } +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/connector.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/connector.go index 837033599e1..426e88b9796 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/connector.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/connector.go @@ -13,12 +13,12 @@ import ( "go.opentelemetry.io/collector/connector/xconnector" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/xconsumer" - "go.opentelemetry.io/collector/internal/telemetry" "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/pipeline/xpipeline" "go.opentelemetry.io/collector/service/internal/attribute" "go.opentelemetry.io/collector/service/internal/builders" "go.opentelemetry.io/collector/service/internal/capabilityconsumer" + "go.opentelemetry.io/collector/service/internal/componentattribute" "go.opentelemetry.io/collector/service/internal/metadata" "go.opentelemetry.io/collector/service/internal/obsconsumer" "go.opentelemetry.io/collector/service/internal/refconsumer" @@ -59,7 +59,7 @@ func (n *connectorNode) buildComponent( ) error { set := connector.Settings{ ID: n.componentID, - TelemetrySettings: telemetry.WithAttributeSet(tel, *n.Set()), + TelemetrySettings: componentattribute.TelemetrySettingsWithAttributes(tel, *n.Set()), BuildInfo: info, } diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/exporter.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/exporter.go index d21f860918f..42d5a648404 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/exporter.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/exporter.go @@ -11,11 +11,11 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/internal/telemetry" "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/pipeline/xpipeline" "go.opentelemetry.io/collector/service/internal/attribute" "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/componentattribute" "go.opentelemetry.io/collector/service/internal/metadata" "go.opentelemetry.io/collector/service/internal/obsconsumer" "go.opentelemetry.io/collector/service/internal/refconsumer" @@ -53,7 +53,7 @@ func (n *exporterNode) buildComponent( ) error { set := exporter.Settings{ ID: n.componentID, - TelemetrySettings: telemetry.WithAttributeSet(tel, *n.Set()), + TelemetrySettings: componentattribute.TelemetrySettingsWithAttributes(tel, *n.Set()), BuildInfo: info, } diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go index ec259c7189d..1b008cb44f7 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go @@ -619,7 +619,7 @@ func connectorStability(f connector.Factory, expType, recType pipeline.Signal) c var ( _ component.Host = (*HostWrapper)(nil) _ componentstatus.Reporter = (*HostWrapper)(nil) - _ hostcapabilities.ExposeExporters = (*HostWrapper)(nil) + _ hostcapabilities.ExposeExporters = (*HostWrapper)(nil) //nolint:staticcheck // SA1019 ) type HostWrapper struct { diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/host.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/host.go index 6f5f6bde356..2fe527a9fff 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/host.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/host.go @@ -24,7 +24,7 @@ import ( var ( _ component.Host = (*Host)(nil) _ hostcapabilities.ModuleInfo = (*Host)(nil) - _ hostcapabilities.ExposeExporters = (*Host)(nil) + _ hostcapabilities.ExposeExporters = (*Host)(nil) //nolint:staticcheck // SA1019 _ hostcapabilities.ComponentFactory = (*Host)(nil) ) diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/processor.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/processor.go index 9a8f5ceb046..c46eddbbccb 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/processor.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/processor.go @@ -10,12 +10,12 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/xconsumer" - "go.opentelemetry.io/collector/internal/telemetry" "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/pipeline/xpipeline" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/service/internal/attribute" "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/componentattribute" "go.opentelemetry.io/collector/service/internal/metadata" "go.opentelemetry.io/collector/service/internal/obsconsumer" "go.opentelemetry.io/collector/service/internal/refconsumer" @@ -53,7 +53,7 @@ func (n *processorNode) buildComponent(ctx context.Context, ) error { set := processor.Settings{ ID: n.componentID, - TelemetrySettings: telemetry.WithAttributeSet(tel, *n.Set()), + TelemetrySettings: componentattribute.TelemetrySettingsWithAttributes(tel, *n.Set()), BuildInfo: info, } diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/receiver.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/receiver.go index 76ca2bae81b..e1c23fae458 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/receiver.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/receiver.go @@ -11,12 +11,12 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/internal/fanoutconsumer" - "go.opentelemetry.io/collector/internal/telemetry" "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/pipeline/xpipeline" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/service/internal/attribute" "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/componentattribute" "go.opentelemetry.io/collector/service/internal/metadata" "go.opentelemetry.io/collector/service/internal/obsconsumer" ) @@ -46,7 +46,7 @@ func (n *receiverNode) buildComponent(ctx context.Context, ) error { set := receiver.Settings{ ID: n.componentID, - TelemetrySettings: telemetry.WithAttributeSet(tel, *n.Set()), + TelemetrySettings: componentattribute.TelemetrySettingsWithAttributes(tel, *n.Set()), BuildInfo: info, } diff --git a/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_feature_gates.go b/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_feature_gates.go new file mode 100644 index 00000000000..ac686925e2a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_feature_gates.go @@ -0,0 +1,31 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/featuregate" +) + +var ServiceAllowNoPipelinesFeatureGate = featuregate.GlobalRegistry().MustRegister( + "service.AllowNoPipelines", + featuregate.StageAlpha, + featuregate.WithRegisterDescription("Allow starting the Collector without starting any pipelines."), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/pull/12613"), + featuregate.WithRegisterFromVersion("v0.122.0"), +) + +var ServiceProfilesSupportFeatureGate = featuregate.GlobalRegistry().MustRegister( + "service.profilesSupport", + featuregate.StageAlpha, + featuregate.WithRegisterDescription("Controls whether profiles support can be enabled"), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/pull/11477"), + featuregate.WithRegisterFromVersion("v0.112.0"), +) + +var TelemetryUseLocalHostAsDefaultMetricsAddressFeatureGate = featuregate.GlobalRegistry().MustRegister( + "telemetry.UseLocalHostAsDefaultMetricsAddress", + featuregate.StageBeta, + featuregate.WithRegisterDescription("Controls whether default Prometheus metrics server use localhost as the default host for their endpoints"), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/pull/11251"), + featuregate.WithRegisterFromVersion("v0.111.0"), +) diff --git a/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go index 9bdb0665c20..381f25019ba 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go @@ -189,109 +189,109 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...Teleme var err, errs error builder.ConnectorConsumedItems, err = builder.meter.Int64Counter( "otelcol.connector.consumed.items", - metric.WithDescription("Number of items passed to the connector."), + metric.WithDescription("Number of items passed to the connector. [Development]"), metric.WithUnit("{item}"), ) errs = errors.Join(errs, err) builder.ConnectorConsumedSize, err = builder.meter.Int64Counter( "otelcol.connector.consumed.size", - metric.WithDescription("Size of items passed to the connector, based on ProtoMarshaler.Sizer."), + metric.WithDescription("Size of items passed to the connector, based on ProtoMarshaler.Sizer. [Development]"), metric.WithUnit("{item}"), ) errs = errors.Join(errs, err) builder.ConnectorProducedItems, err = builder.meter.Int64Counter( "otelcol.connector.produced.items", - metric.WithDescription("Number of items emitted from the connector."), + metric.WithDescription("Number of items emitted from the connector. [Development]"), metric.WithUnit("{item}"), ) errs = errors.Join(errs, err) builder.ConnectorProducedSize, err = builder.meter.Int64Counter( "otelcol.connector.produced.size", - metric.WithDescription("Size of items emitted from the connector, based on ProtoMarshaler.Sizer."), + metric.WithDescription("Size of items emitted from the connector, based on ProtoMarshaler.Sizer. [Development]"), metric.WithUnit("{item}"), ) errs = errors.Join(errs, err) builder.ExporterConsumedItems, err = builder.meter.Int64Counter( "otelcol.exporter.consumed.items", - metric.WithDescription("Number of items passed to the exporter."), + metric.WithDescription("Number of items passed to the exporter. [Development]"), metric.WithUnit("{item}"), ) errs = errors.Join(errs, err) builder.ExporterConsumedSize, err = builder.meter.Int64Counter( "otelcol.exporter.consumed.size", - metric.WithDescription("Size of items passed to the exporter, based on ProtoMarshaler.Sizer."), + metric.WithDescription("Size of items passed to the exporter, based on ProtoMarshaler.Sizer. [Development]"), metric.WithUnit("{item}"), ) errs = errors.Join(errs, err) builder.ProcessCPUSeconds, err = builder.meter.Float64ObservableCounter( "otelcol_process_cpu_seconds", - metric.WithDescription("Total CPU user and system time in seconds [alpha]"), + metric.WithDescription("Total CPU user and system time in seconds [Alpha]"), metric.WithUnit("s"), ) errs = errors.Join(errs, err) builder.ProcessMemoryRss, err = builder.meter.Int64ObservableGauge( "otelcol_process_memory_rss", - metric.WithDescription("Total physical memory (resident set size) [alpha]"), + metric.WithDescription("Total physical memory (resident set size) [Alpha]"), metric.WithUnit("By"), ) errs = errors.Join(errs, err) builder.ProcessRuntimeHeapAllocBytes, err = builder.meter.Int64ObservableGauge( "otelcol_process_runtime_heap_alloc_bytes", - metric.WithDescription("Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') [alpha]"), + metric.WithDescription("Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') [Alpha]"), metric.WithUnit("By"), ) errs = errors.Join(errs, err) builder.ProcessRuntimeTotalAllocBytes, err = builder.meter.Int64ObservableCounter( "otelcol_process_runtime_total_alloc_bytes", - metric.WithDescription("Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') [alpha]"), + metric.WithDescription("Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') [Alpha]"), metric.WithUnit("By"), ) errs = errors.Join(errs, err) builder.ProcessRuntimeTotalSysMemoryBytes, err = builder.meter.Int64ObservableGauge( "otelcol_process_runtime_total_sys_memory_bytes", - metric.WithDescription("Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') [alpha]"), + metric.WithDescription("Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') [Alpha]"), metric.WithUnit("By"), ) errs = errors.Join(errs, err) builder.ProcessUptime, err = builder.meter.Float64ObservableCounter( "otelcol_process_uptime", - metric.WithDescription("Uptime of the process [alpha]"), + metric.WithDescription("Uptime of the process [Alpha]"), metric.WithUnit("s"), ) errs = errors.Join(errs, err) builder.ProcessorConsumedItems, err = builder.meter.Int64Counter( "otelcol.processor.consumed.items", - metric.WithDescription("Number of items passed to the processor."), + metric.WithDescription("Number of items passed to the processor. [Development]"), metric.WithUnit("{item}"), ) errs = errors.Join(errs, err) builder.ProcessorConsumedSize, err = builder.meter.Int64Counter( "otelcol.processor.consumed.size", - metric.WithDescription("Size of items passed to the processor, based on ProtoMarshaler.Sizer."), + metric.WithDescription("Size of items passed to the processor, based on ProtoMarshaler.Sizer. [Development]"), metric.WithUnit("{item}"), ) errs = errors.Join(errs, err) builder.ProcessorProducedItems, err = builder.meter.Int64Counter( "otelcol.processor.produced.items", - metric.WithDescription("Number of items emitted from the processor."), + metric.WithDescription("Number of items emitted from the processor. [Development]"), metric.WithUnit("{item}"), ) errs = errors.Join(errs, err) builder.ProcessorProducedSize, err = builder.meter.Int64Counter( "otelcol.processor.produced.size", - metric.WithDescription("Size of items emitted from the processor, based on ProtoMarshaler.Sizer."), + metric.WithDescription("Size of items emitted from the processor, based on ProtoMarshaler.Sizer. [Development]"), metric.WithUnit("{item}"), ) errs = errors.Join(errs, err) builder.ReceiverProducedItems, err = builder.meter.Int64Counter( "otelcol.receiver.produced.items", - metric.WithDescription("Number of items emitted from the receiver."), + metric.WithDescription("Number of items emitted from the receiver. [Development]"), metric.WithUnit("{item}"), ) errs = errors.Join(errs, err) builder.ReceiverProducedSize, err = builder.meter.Int64Counter( "otelcol.receiver.produced.size", - metric.WithDescription("Size of items emitted from the receiver, based on ProtoMarshaler.Sizer."), + metric.WithDescription("Size of items emitted from the receiver, based on ProtoMarshaler.Sizer. [Development]"), metric.WithUnit("{item}"), ) errs = errors.Join(errs, err) diff --git a/vendor/go.opentelemetry.io/collector/service/internal/metricviews/views.go b/vendor/go.opentelemetry.io/collector/service/internal/metricviews/views.go new file mode 100644 index 00000000000..137bffd6e06 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/metricviews/views.go @@ -0,0 +1,162 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metricviews // import "go.opentelemetry.io/collector/service/internal/metricviews" + +import ( + config "go.opentelemetry.io/contrib/otelconf/v0.3.0" + + "go.opentelemetry.io/collector/config/configtelemetry" +) + +// DefaultViews builds the default metric views used by the service. +func DefaultViews(level configtelemetry.Level) []config.View { + views := []config.View{} + + if level < configtelemetry.LevelDetailed { + // Drop all otelhttp and otelgrpc metrics if the level is not detailed. + views = append(views, + dropViewOption(&config.ViewSelector{ + MeterName: ptr("go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"), + }), + dropViewOption(&config.ViewSelector{ + MeterName: ptr("go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"), + }), + // Drop duration metric if the level is not detailed + dropViewOption(&config.ViewSelector{ + MeterName: ptr("go.opentelemetry.io/collector/processor/processorhelper"), + InstrumentName: ptr("otelcol_processor_internal_duration"), + }), + ) + } + + // otel-arrow library metrics + // See https://github.com/open-telemetry/otel-arrow/blob/c39257/pkg/otel/arrow_record/consumer.go#L174-L176 + if level < configtelemetry.LevelNormal { + scope := ptr("otel-arrow/pkg/otel/arrow_record") + views = append(views, + dropViewOption(&config.ViewSelector{ + MeterName: scope, + InstrumentName: ptr("arrow_batch_records"), + }), + dropViewOption(&config.ViewSelector{ + MeterName: scope, + InstrumentName: ptr("arrow_schema_resets"), + }), + dropViewOption(&config.ViewSelector{ + MeterName: scope, + InstrumentName: ptr("arrow_memory_inuse"), + }), + ) + } + + // contrib's internal/otelarrow/netstats metrics + // See + // - https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/a25f05/internal/otelarrow/netstats/netstats.go#L130 + // - https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/a25f05/internal/otelarrow/netstats/netstats.go#L165 + if level < configtelemetry.LevelDetailed { + scope := ptr("github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/netstats") + + views = append(views, + // Compressed size metrics. + dropViewOption(&config.ViewSelector{ + MeterName: scope, + InstrumentName: ptr("otelcol_*_compressed_size"), + }), + dropViewOption(&config.ViewSelector{ + MeterName: scope, + InstrumentName: ptr("otelcol_*_compressed_size"), + }), + + // makeRecvMetrics for exporters. + dropViewOption(&config.ViewSelector{ + MeterName: scope, + InstrumentName: ptr("otelcol_exporter_recv"), + }), + dropViewOption(&config.ViewSelector{ + MeterName: scope, + InstrumentName: ptr("otelcol_exporter_recv_wire"), + }), + + // makeSentMetrics for receivers. + dropViewOption(&config.ViewSelector{ + MeterName: scope, + InstrumentName: ptr("otelcol_receiver_sent"), + }), + dropViewOption(&config.ViewSelector{ + MeterName: scope, + InstrumentName: ptr("otelcol_receiver_sent_wire"), + }), + ) + } + + // Batch exporter metrics + if level < configtelemetry.LevelDetailed { + scope := ptr("go.opentelemetry.io/collector/exporter/exporterhelper") + views = append(views, + dropViewOption(&config.ViewSelector{ + MeterName: scope, + InstrumentName: ptr("otelcol_exporter_queue_batch_send_size_bytes"), + }), + dropViewOption(&config.ViewSelector{ + MeterName: scope, + InstrumentName: ptr("otelcol_exporter_queue_batch_send_size"), + }), + config.View{ + Selector: &config.ViewSelector{ + MeterName: scope, + InstrumentName: ptr("otelcol_exporter_send_failed_*"), + }, + Stream: &config.ViewStream{ + AttributeKeys: &config.IncludeExclude{ + Excluded: []string{"error.type", "error.permanent"}, + }, + }, + }, + ) + } + + // Batch processor metrics + scope := ptr("go.opentelemetry.io/collector/processor/batchprocessor") + if level < configtelemetry.LevelNormal { + views = append(views, dropViewOption(&config.ViewSelector{ + MeterName: scope, + })) + } else if level < configtelemetry.LevelDetailed { + views = append(views, dropViewOption(&config.ViewSelector{ + MeterName: scope, + InstrumentName: ptr("otelcol_processor_batch_batch_send_size_bytes"), + })) + } + + // Internal graph metrics + graphScope := ptr("go.opentelemetry.io/collector/service") + if level < configtelemetry.LevelDetailed { + views = append(views, + dropViewOption(&config.ViewSelector{ + MeterName: graphScope, + InstrumentName: ptr("otelcol.*.consumed.size"), + }), + dropViewOption(&config.ViewSelector{ + MeterName: graphScope, + InstrumentName: ptr("otelcol.*.produced.size"), + })) + } + + return views +} + +func dropViewOption(selector *config.ViewSelector) config.View { + return config.View{ + Selector: selector, + Stream: &config.ViewStream{ + Aggregation: &config.ViewStreamAggregation{ + Drop: config.ViewStreamAggregationDrop{}, + }, + }, + } +} + +func ptr[T any](v T) *T { + return &v +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/logs.go b/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/logs.go index c2fcdf066fe..ff58afda927 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/logs.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/logs.go @@ -6,13 +6,11 @@ package obsconsumer // import "go.opentelemetry.io/collector/service/internal/ob import ( "context" - "go.opentelemetry.io/otel/attribute" "go.uber.org/zap" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/internal/telemetry" - "go.opentelemetry.io/collector/internal/telemetry/componentattribute" "go.opentelemetry.io/collector/pdata/plog" ) @@ -34,7 +32,7 @@ func NewLogs(cons consumer.Logs, set Settings, opts ...Option) consumer.Logs { consumerSet := Settings{ ItemCounter: set.ItemCounter, SizeCounter: set.SizeCounter, - Logger: set.Logger.With(componentattribute.ToZapFields(attribute.NewSet(o.staticDataPointAttributes...))...), + Logger: set.Logger.With(telemetry.ToZapFields(o.staticDataPointAttributes)...), } return obsLogs{ diff --git a/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/metrics.go b/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/metrics.go index 7c56e89548c..bcbf4dec829 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/metrics.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/metrics.go @@ -6,13 +6,11 @@ package obsconsumer // import "go.opentelemetry.io/collector/service/internal/ob import ( "context" - "go.opentelemetry.io/otel/attribute" "go.uber.org/zap" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/internal/telemetry" - "go.opentelemetry.io/collector/internal/telemetry/componentattribute" "go.opentelemetry.io/collector/pdata/pmetric" ) @@ -34,7 +32,7 @@ func NewMetrics(cons consumer.Metrics, set Settings, opts ...Option) consumer.Me consumerSet := Settings{ ItemCounter: set.ItemCounter, SizeCounter: set.SizeCounter, - Logger: set.Logger.With(componentattribute.ToZapFields(attribute.NewSet(o.staticDataPointAttributes...))...), + Logger: set.Logger.With(telemetry.ToZapFields(o.staticDataPointAttributes)...), } return obsMetrics{ diff --git a/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/profiles.go b/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/profiles.go index f47b464dc2b..6c1f757b491 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/profiles.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/profiles.go @@ -6,14 +6,12 @@ package obsconsumer // import "go.opentelemetry.io/collector/service/internal/ob import ( "context" - "go.opentelemetry.io/otel/attribute" "go.uber.org/zap" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/internal/telemetry" - "go.opentelemetry.io/collector/internal/telemetry/componentattribute" "go.opentelemetry.io/collector/pdata/pprofile" ) @@ -35,7 +33,7 @@ func NewProfiles(cons xconsumer.Profiles, set Settings, opts ...Option) xconsume consumerSet := Settings{ ItemCounter: set.ItemCounter, SizeCounter: set.SizeCounter, - Logger: set.Logger.With(componentattribute.ToZapFields(attribute.NewSet(o.staticDataPointAttributes...))...), + Logger: set.Logger.With(telemetry.ToZapFields(o.staticDataPointAttributes)...), } return obsProfiles{ diff --git a/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/traces.go b/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/traces.go index 0c32bcef01b..0fd89dd33fe 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/traces.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/obsconsumer/traces.go @@ -6,13 +6,11 @@ package obsconsumer // import "go.opentelemetry.io/collector/service/internal/ob import ( "context" - "go.opentelemetry.io/otel/attribute" "go.uber.org/zap" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/internal/telemetry" - "go.opentelemetry.io/collector/internal/telemetry/componentattribute" "go.opentelemetry.io/collector/pdata/ptrace" ) @@ -34,7 +32,7 @@ func NewTraces(cons consumer.Traces, set Settings, opts ...Option) consumer.Trac consumerSet := Settings{ ItemCounter: set.ItemCounter, SizeCounter: set.SizeCounter, - Logger: set.Logger.With(componentattribute.ToZapFields(attribute.NewSet(o.staticDataPointAttributes...))...), + Logger: set.Logger.With(telemetry.ToZapFields(o.staticDataPointAttributes)...), } return obsTraces{ diff --git a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go b/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go index e1f36ca61f9..af00d2b6234 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go @@ -70,7 +70,6 @@ func RegisterProcessMetrics(cfg component.TelemetrySettings, opts ...RegisterOpt ctx = context.WithValue(ctx, common.EnvKey, common.EnvMap{common.HostProcEnvKey: set.hostProc}) } pm.context = ctx - //nolint:gosec pm.proc, err = process.NewProcessWithContext(pm.context, int32(os.Getpid())) if err != nil { return err @@ -100,7 +99,6 @@ func (pm *processMetrics) updateAllocMem(_ context.Context, obs metric.Int64Obse pm.mu.Lock() defer pm.mu.Unlock() pm.readMemStatsIfNeeded() - //nolint:gosec obs.Observe(int64(pm.ms.Alloc)) return nil } @@ -109,7 +107,6 @@ func (pm *processMetrics) updateTotalAllocMem(_ context.Context, obs metric.Int6 pm.mu.Lock() defer pm.mu.Unlock() pm.readMemStatsIfNeeded() - //nolint:gosec obs.Observe(int64(pm.ms.TotalAlloc)) return nil } @@ -118,7 +115,6 @@ func (pm *processMetrics) updateSysMem(_ context.Context, obs metric.Int64Observ pm.mu.Lock() defer pm.mu.Unlock() pm.readMemStatsIfNeeded() - //nolint:gosec obs.Observe(int64(pm.ms.Sys)) return nil } @@ -139,7 +135,6 @@ func (pm *processMetrics) updateRSSMemory(_ context.Context, obs metric.Int64Obs if err != nil { return err } - //nolint:gosec obs.Observe(int64(mem.RSS)) return nil } diff --git a/vendor/go.opentelemetry.io/collector/service/internal/resource/config.go b/vendor/go.opentelemetry.io/collector/service/internal/resource/config.go deleted file mode 100644 index a233bbafe29..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/internal/resource/config.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package resource // import "go.opentelemetry.io/collector/service/internal/resource" - -import ( - "github.com/google/uuid" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.18.0" - - "go.opentelemetry.io/collector/component" -) - -// New resource from telemetry configuration. -func New(buildInfo component.BuildInfo, resourceCfg map[string]*string) *resource.Resource { - var telAttrs []attribute.KeyValue - - for k, v := range resourceCfg { - // nil value indicates that the attribute should not be included in the telemetry. - if v != nil { - telAttrs = append(telAttrs, attribute.String(k, *v)) - } - } - - if _, ok := resourceCfg[string(semconv.ServiceNameKey)]; !ok { - // AttributeServiceName is not specified in the config. Use the default service name. - telAttrs = append(telAttrs, semconv.ServiceNameKey.String(buildInfo.Command)) - } - - if _, ok := resourceCfg[string(semconv.ServiceInstanceIDKey)]; !ok { - // AttributeServiceInstanceID is not specified in the config. Auto-generate one. - instanceUUID, _ := uuid.NewRandom() - instanceID := instanceUUID.String() - telAttrs = append(telAttrs, semconv.ServiceInstanceIDKey.String(instanceID)) - } - - if _, ok := resourceCfg[string(semconv.ServiceVersionKey)]; !ok { - // AttributeServiceVersion is not specified in the config. Use the actual - // build version. - telAttrs = append(telAttrs, semconv.ServiceVersionKey.String(buildInfo.Version)) - } - return resource.NewWithAttributes(semconv.SchemaURL, telAttrs...) -} diff --git a/vendor/go.opentelemetry.io/collector/service/metadata.yaml b/vendor/go.opentelemetry.io/collector/service/metadata.yaml index 344b7e587c3..de436afbb7a 100644 --- a/vendor/go.opentelemetry.io/collector/service/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/service/metadata.yaml @@ -1,3 +1,4 @@ +display_name: Service type: service github_project: open-telemetry/opentelemetry-collector @@ -5,7 +6,7 @@ status: disable_codecov_badge: true class: pkg stability: - development: [traces, metrics, logs] + development: [traces, metrics, logs, profiles] distributions: [core, contrib] telemetry: @@ -13,6 +14,7 @@ telemetry: connector.consumed.items: prefix: otelcol. enabled: true + stability: development description: Number of items passed to the connector. unit: "{item}" sum: @@ -22,6 +24,7 @@ telemetry: connector.consumed.size: prefix: otelcol. enabled: false + stability: development description: Size of items passed to the connector, based on ProtoMarshaler.Sizer. unit: "{item}" sum: @@ -31,6 +34,7 @@ telemetry: connector.produced.items: prefix: otelcol. enabled: true + stability: development description: Number of items emitted from the connector. unit: "{item}" sum: @@ -40,6 +44,7 @@ telemetry: connector.produced.size: prefix: otelcol. enabled: false + stability: development description: Size of items emitted from the connector, based on ProtoMarshaler.Sizer. unit: "{item}" sum: @@ -49,6 +54,7 @@ telemetry: exporter.consumed.items: prefix: otelcol. enabled: true + stability: development description: Number of items passed to the exporter. unit: "{item}" sum: @@ -58,6 +64,7 @@ telemetry: exporter.consumed.size: prefix: otelcol. enabled: false + stability: development description: Size of items passed to the exporter, based on ProtoMarshaler.Sizer. unit: "{item}" sum: @@ -66,8 +73,7 @@ telemetry: process_cpu_seconds: enabled: true - stability: - level: alpha + stability: alpha description: Total CPU user and system time in seconds unit: s sum: @@ -77,8 +83,7 @@ telemetry: process_memory_rss: enabled: true - stability: - level: alpha + stability: alpha description: Total physical memory (resident set size) unit: By gauge: @@ -87,8 +92,7 @@ telemetry: process_runtime_heap_alloc_bytes: enabled: true - stability: - level: alpha + stability: alpha description: Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') unit: By gauge: @@ -97,8 +101,7 @@ telemetry: process_runtime_total_alloc_bytes: enabled: true - stability: - level: alpha + stability: alpha description: Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') unit: By sum: @@ -108,8 +111,7 @@ telemetry: process_runtime_total_sys_memory_bytes: enabled: true - stability: - level: alpha + stability: alpha description: Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') unit: By gauge: @@ -118,8 +120,7 @@ telemetry: process_uptime: enabled: true - stability: - level: alpha + stability: alpha description: Uptime of the process unit: s sum: @@ -130,6 +131,7 @@ telemetry: processor.consumed.items: prefix: otelcol. enabled: true + stability: development description: Number of items passed to the processor. unit: "{item}" sum: @@ -139,6 +141,7 @@ telemetry: processor.consumed.size: prefix: otelcol. enabled: false + stability: development description: Size of items passed to the processor, based on ProtoMarshaler.Sizer. unit: "{item}" sum: @@ -148,6 +151,7 @@ telemetry: processor.produced.items: prefix: otelcol. enabled: true + stability: development description: Number of items emitted from the processor. unit: "{item}" sum: @@ -157,6 +161,7 @@ telemetry: processor.produced.size: prefix: otelcol. enabled: false + stability: development description: Size of items emitted from the processor, based on ProtoMarshaler.Sizer. unit: "{item}" sum: @@ -166,6 +171,7 @@ telemetry: receiver.produced.items: prefix: otelcol. enabled: true + stability: development description: Number of items emitted from the receiver. unit: "{item}" sum: @@ -175,8 +181,26 @@ telemetry: receiver.produced.size: prefix: otelcol. enabled: false + stability: development description: Size of items emitted from the receiver, based on ProtoMarshaler.Sizer. unit: "{item}" sum: value_type: int monotonic: true + +feature_gates: + - id: service.AllowNoPipelines + description: 'Allow starting the Collector without starting any pipelines.' + stage: alpha + from_version: 'v0.122.0' + reference_url: 'https://github.com/open-telemetry/opentelemetry-collector/pull/12613' + - id: service.profilesSupport + description: 'Controls whether profiles support can be enabled' + stage: alpha + from_version: 'v0.112.0' + reference_url: 'https://github.com/open-telemetry/opentelemetry-collector/pull/11477' + - id: telemetry.UseLocalHostAsDefaultMetricsAddress + description: 'Controls whether default Prometheus metrics server use localhost as the default host for their endpoints' + stage: beta + from_version: 'v0.111.0' + reference_url: 'https://github.com/open-telemetry/opentelemetry-collector/pull/11251' diff --git a/vendor/go.opentelemetry.io/collector/service/pipelines/config.go b/vendor/go.opentelemetry.io/collector/service/pipelines/config.go index b30afa0090b..154d848a1e2 100644 --- a/vendor/go.opentelemetry.io/collector/service/pipelines/config.go +++ b/vendor/go.opentelemetry.io/collector/service/pipelines/config.go @@ -8,9 +8,9 @@ import ( "fmt" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/pipeline/xpipeline" + "go.opentelemetry.io/collector/service/internal/metadata" ) var ( @@ -18,19 +18,7 @@ var ( errMissingServicePipelineReceivers = errors.New("must have at least one receiver") errMissingServicePipelineExporters = errors.New("must have at least one exporter") - serviceProfileSupportGateID = "service.profilesSupport" - serviceProfileSupportGate = featuregate.GlobalRegistry().MustRegister( - serviceProfileSupportGateID, - featuregate.StageAlpha, - featuregate.WithRegisterFromVersion("v0.112.0"), - featuregate.WithRegisterDescription("Controls whether profiles support can be enabled"), - ) - AllowNoPipelines = featuregate.GlobalRegistry().MustRegister( - "service.AllowNoPipelines", - featuregate.StageAlpha, - featuregate.WithRegisterFromVersion("v0.122.0"), - featuregate.WithRegisterDescription("Allow starting the Collector without starting any pipelines."), - ) + AllowNoPipelines = metadata.ServiceAllowNoPipelinesFeatureGate ) // Config defines the configurable settings for service telemetry. @@ -38,11 +26,11 @@ type Config map[pipeline.ID]*PipelineConfig func (cfg Config) Validate() error { // Must have at least one pipeline unless explicitly disabled. - if !AllowNoPipelines.IsEnabled() && len(cfg) == 0 { + if !metadata.ServiceAllowNoPipelinesFeatureGate.IsEnabled() && len(cfg) == 0 { return errMissingServicePipelines } - if !serviceProfileSupportGate.IsEnabled() { + if !metadata.ServiceProfilesSupportFeatureGate.IsEnabled() { // Check that all pipelines have at least one receiver and one exporter, and they reference // only configured components. for pipelineID := range cfg { @@ -50,7 +38,7 @@ func (cfg Config) Validate() error { return fmt.Errorf( "pipeline %q: profiling signal support is at alpha level, gated under the %q feature gate", pipelineID.String(), - serviceProfileSupportGateID, + metadata.ServiceProfilesSupportFeatureGate.ID(), ) } } diff --git a/vendor/go.opentelemetry.io/collector/service/service.go b/vendor/go.opentelemetry.io/collector/service/service.go index 14cb67c1646..0780648a407 100644 --- a/vendor/go.opentelemetry.io/collector/service/service.go +++ b/vendor/go.opentelemetry.io/collector/service/service.go @@ -11,40 +11,29 @@ import ( "fmt" "runtime" - config "go.opentelemetry.io/contrib/otelconf/v0.3.0" noopmetric "go.opentelemetry.io/otel/metric/noop" nooptrace "go.opentelemetry.io/otel/trace/noop" "go.uber.org/multierr" "go.uber.org/zap" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/extension" - "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/service/extensions" "go.opentelemetry.io/collector/service/internal/builders" "go.opentelemetry.io/collector/service/internal/graph" + "go.opentelemetry.io/collector/service/internal/metricviews" "go.opentelemetry.io/collector/service/internal/moduleinfo" "go.opentelemetry.io/collector/service/internal/proctelemetry" "go.opentelemetry.io/collector/service/internal/status" "go.opentelemetry.io/collector/service/telemetry" ) -// This feature gate is deprecated and will be removed in 1.40.0. Views can now be configured. -var _ = featuregate.GlobalRegistry().MustRegister( - "telemetry.disableHighCardinalityMetrics", - featuregate.StageDeprecated, - featuregate.WithRegisterToVersion("0.133.0"), - featuregate.WithRegisterDescription( - "Controls whether the collector should enable potentially high "+ - "cardinality metrics. Deprecated, configure service::telemetry::metrics::views instead.")) - // ModuleInfo describes the Go module for a particular component. type ModuleInfo = moduleinfo.ModuleInfo @@ -89,8 +78,20 @@ type Settings struct { AsyncErrorChannel chan error // LoggingOptions provides a way to change behavior of zap logging. + // + // These options will be appended to any options passed to BuildZapLogger. + // + // Deprecated [v0.142.0]: use BuildZapLogger instead. This field will be + // removed in the future, and options must be injected through BuildZapLogger. LoggingOptions []zap.Option + // BuildZapLogger holds an optional function for creating a Zap logger from + // a zap.Config and options. If this is unspecified, zap.Config.Build will + // be used. + // + // NOTE: in the future this field will be required. + BuildZapLogger func(zap.Config, ...zap.Option) (*zap.Logger, error) + // TelemetryFactory is the factory for creating internal telemetry providers. TelemetryFactory telemetry.Factory } @@ -124,16 +125,37 @@ func New(ctx context.Context, set Settings, cfg Config) (_ *Service, resultErr e collectorConf: set.CollectorConf, } - // Create the logger & LoggerProvider first. These may be used - // when creating the other telemetry providers. - telemetrySettings := telemetry.Settings{BuildInfo: set.BuildInfo} - loggerSettings := telemetry.LoggerSettings{ - Settings: telemetrySettings, - ZapOptions: set.LoggingOptions, - } if set.TelemetryFactory == nil { return nil, errors.New("telemetry factory not provided") } + + // Create the resource first. This ensures all telemetry providers + // (logger, meter, tracer) use the same resource with a consistent service.instance.id. + telemetrySettings := telemetry.Settings{BuildInfo: set.BuildInfo} + resource, err := set.TelemetryFactory.CreateResource(ctx, telemetrySettings, cfg.Telemetry) + if err != nil { + return nil, fmt.Errorf("failed to create resource: %w", err) + } + telemetrySettings.Resource = &resource + + // Create a function for telemetry providers to build the Zap logger. + // This injects any LoggingOptions specified in the Settings. + buildZapLogger := set.BuildZapLogger + if buildZapLogger == nil { + buildZapLogger = zap.Config.Build + } + if len(set.LoggingOptions) > 0 { + origBuildZapLogger := buildZapLogger + buildZapLogger = func(cfg zap.Config, opts ...zap.Option) (*zap.Logger, error) { + opts = append(opts, set.LoggingOptions...) + return origBuildZapLogger(cfg, opts...) + } + } + + loggerSettings := telemetry.LoggerSettings{ + Settings: telemetrySettings, + BuildZapLogger: buildZapLogger, + } logger, loggerShutdownFunc, err := set.TelemetryFactory.CreateLogger(ctx, loggerSettings, cfg.Telemetry) if err != nil { return nil, fmt.Errorf("failed to create logger: %w", err) @@ -149,7 +171,7 @@ func New(ctx context.Context, set Settings, cfg Config) (_ *Service, resultErr e meterSettings := telemetry.MeterSettings{ Settings: telemetrySettings, Logger: logger, - DefaultViews: configureViews, + DefaultViews: metricviews.DefaultViews, } meterProvider, err := set.TelemetryFactory.CreateMeterProvider(ctx, meterSettings, cfg.Telemetry) if err != nil { @@ -177,11 +199,6 @@ func New(ctx context.Context, set Settings, cfg Config) (_ *Service, resultErr e }() srv.tracerProvider = tracerProvider - resource, err := set.TelemetryFactory.CreateResource(ctx, telemetrySettings, cfg.Telemetry) - if err != nil { - return nil, fmt.Errorf("failed to create resource: %w", err) - } - srv.telemetrySettings = component.TelemetrySettings{ Logger: logger, MeterProvider: meterProvider, @@ -206,9 +223,11 @@ func New(ctx context.Context, set Settings, cfg Config) (_ *Service, resultErr e return nil, err } - if err := proctelemetry.RegisterProcessMetrics(srv.telemetrySettings); err != nil { - return nil, fmt.Errorf("failed to register process metrics: %w", err) + // Register process metrics on supported OSes. + if err := registerProcessMetrics(srv, runtime.GOOS, proctelemetry.RegisterProcessMetrics); err != nil { + return nil, err } + return srv, nil } @@ -325,140 +344,6 @@ func (srv *Service) Logger() *zap.Logger { return srv.telemetrySettings.Logger } -func dropViewOption(selector *config.ViewSelector) config.View { - return config.View{ - Selector: selector, - Stream: &config.ViewStream{ - Aggregation: &config.ViewStreamAggregation{ - Drop: config.ViewStreamAggregationDrop{}, - }, - }, - } -} - -func configureViews(level configtelemetry.Level) []config.View { - views := []config.View{} - - if level < configtelemetry.LevelDetailed { - // Drop all otelhttp and otelgrpc metrics if the level is not detailed. - views = append(views, - dropViewOption(&config.ViewSelector{ - MeterName: ptr("go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"), - }), - dropViewOption(&config.ViewSelector{ - MeterName: ptr("go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"), - }), - // Drop duration metric if the level is not detailed - dropViewOption(&config.ViewSelector{ - MeterName: ptr("go.opentelemetry.io/collector/processor/processorhelper"), - InstrumentName: ptr("otelcol_processor_internal_duration"), - }), - ) - } - - // otel-arrow library metrics - // See https://github.com/open-telemetry/otel-arrow/blob/c39257/pkg/otel/arrow_record/consumer.go#L174-L176 - if level < configtelemetry.LevelNormal { - scope := ptr("otel-arrow/pkg/otel/arrow_record") - views = append(views, - dropViewOption(&config.ViewSelector{ - MeterName: scope, - InstrumentName: ptr("arrow_batch_records"), - }), - dropViewOption(&config.ViewSelector{ - MeterName: scope, - InstrumentName: ptr("arrow_schema_resets"), - }), - dropViewOption(&config.ViewSelector{ - MeterName: scope, - InstrumentName: ptr("arrow_memory_inuse"), - }), - ) - } - - // contrib's internal/otelarrow/netstats metrics - // See - // - https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/a25f05/internal/otelarrow/netstats/netstats.go#L130 - // - https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/a25f05/internal/otelarrow/netstats/netstats.go#L165 - if level < configtelemetry.LevelDetailed { - scope := ptr("github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/netstats") - - views = append(views, - // Compressed size metrics. - dropViewOption(&config.ViewSelector{ - MeterName: scope, - InstrumentName: ptr("otelcol_*_compressed_size"), - }), - dropViewOption(&config.ViewSelector{ - MeterName: scope, - InstrumentName: ptr("otelcol_*_compressed_size"), - }), - - // makeRecvMetrics for exporters. - dropViewOption(&config.ViewSelector{ - MeterName: scope, - InstrumentName: ptr("otelcol_exporter_recv"), - }), - dropViewOption(&config.ViewSelector{ - MeterName: scope, - InstrumentName: ptr("otelcol_exporter_recv_wire"), - }), - - // makeSentMetrics for receivers. - dropViewOption(&config.ViewSelector{ - MeterName: scope, - InstrumentName: ptr("otelcol_receiver_sent"), - }), - dropViewOption(&config.ViewSelector{ - MeterName: scope, - InstrumentName: ptr("otelcol_receiver_sent_wire"), - }), - ) - } - - // Batch exporter metrics - if level < configtelemetry.LevelDetailed { - scope := ptr("go.opentelemetry.io/collector/exporter/exporterhelper") - views = append(views, dropViewOption(&config.ViewSelector{ - MeterName: scope, - InstrumentName: ptr("otelcol_exporter_queue_batch_send_size_bytes"), - })) - } - - // Batch processor metrics - scope := ptr("go.opentelemetry.io/collector/processor/batchprocessor") - if level < configtelemetry.LevelNormal { - views = append(views, dropViewOption(&config.ViewSelector{ - MeterName: scope, - })) - } else if level < configtelemetry.LevelDetailed { - views = append(views, dropViewOption(&config.ViewSelector{ - MeterName: scope, - InstrumentName: ptr("otelcol_processor_batch_batch_send_size_bytes"), - })) - } - - // Internal graph metrics - graphScope := ptr("go.opentelemetry.io/collector/service") - if level < configtelemetry.LevelDetailed { - views = append(views, - dropViewOption(&config.ViewSelector{ - MeterName: graphScope, - InstrumentName: ptr("otelcol.*.consumed.size"), - }), - dropViewOption(&config.ViewSelector{ - MeterName: graphScope, - InstrumentName: ptr("otelcol.*.produced.size"), - })) - } - - return views -} - -func ptr[T any](v T) *T { - return &v -} - // Validate verifies the graph by calling the internal graph.Build. func Validate(ctx context.Context, set Settings, cfg Config) error { tel := component.TelemetrySettings{ @@ -481,3 +366,30 @@ func Validate(ctx context.Context, set Settings, cfg Config) error { } return nil } + +// registerProcessMetrics registers process metrics on supported operating systems. +// +// Historically, attempting to register process metrics on unsupported platforms +// (e.g. AIX) caused the Collector to fail at startup. +// See https://github.com/open-telemetry/opentelemetry-collector/issues/12098 +func registerProcessMetrics( + srv *Service, + goos string, + register func(component.TelemetrySettings, ...proctelemetry.RegisterOption) error, +) error { + switch goos { + // Only support the OSes that we explicitly test and build for, + // plus others that are known to have some support in gopsutil. + case "linux", "darwin", "windows", "freebsd", "openbsd", "solaris", "plan9": + if err := register(srv.telemetrySettings); err != nil { + return fmt.Errorf("failed to register process metrics: %w", err) + } + default: + // On unsupported OSes, log a warning and continue startup. + srv.telemetrySettings.Logger.Warn( + "Process metrics are disabled on this operating system", + zap.String("os", goos), + ) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/internal/migration/normalize.go b/vendor/go.opentelemetry.io/collector/service/telemetry/internal/migration/normalize.go deleted file mode 100644 index 1f7ff8dfa8d..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/internal/migration/normalize.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package migration // import "go.opentelemetry.io/collector/service/telemetry/internal/migration" - -import "strings" - -func normalizeEndpoint(endpoint string, insecure *bool) *string { - if !strings.HasPrefix(endpoint, "https://") && !strings.HasPrefix(endpoint, "http://") { - if insecure != nil && *insecure { - endpoint = "http://" + endpoint - } else { - endpoint = "https://" + endpoint - } - } - return &endpoint -} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/internal/migration/v0.2.0.go b/vendor/go.opentelemetry.io/collector/service/telemetry/internal/migration/v0.2.0.go deleted file mode 100644 index 094b37a357b..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/internal/migration/v0.2.0.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package migration // import "go.opentelemetry.io/collector/service/telemetry/internal/migration" - -import ( - configv02 "go.opentelemetry.io/contrib/otelconf/v0.2.0" - config "go.opentelemetry.io/contrib/otelconf/v0.3.0" - "go.uber.org/zap/zapcore" - - "go.opentelemetry.io/collector/config/configtelemetry" -) - -type tracesConfigV020 struct { - Level configtelemetry.Level `mapstructure:"level"` - Propagators []string `mapstructure:"propagators"` - Processors []configv02.SpanProcessor `mapstructure:"processors"` -} - -type metricsConfigV020 struct { - Level configtelemetry.Level `mapstructure:"level"` - Readers []configv02.MetricReader `mapstructure:"readers"` -} - -type logsConfigV020 struct { - Level zapcore.Level `mapstructure:"level"` - Development bool `mapstructure:"development"` - Encoding string `mapstructure:"encoding"` - DisableCaller bool `mapstructure:"disable_caller"` - DisableStacktrace bool `mapstructure:"disable_stacktrace"` - Sampling *LogsSamplingConfig `mapstructure:"sampling"` - OutputPaths []string `mapstructure:"output_paths"` - ErrorOutputPaths []string `mapstructure:"error_output_paths"` - InitialFields map[string]any `mapstructure:"initial_fields"` - Processors []configv02.LogRecordProcessor `mapstructure:"processors"` -} - -func headersV02ToV03(in configv02.Headers) []config.NameStringValuePair { - headers := make([]config.NameStringValuePair, 0, len(in)) - for k, v := range in { - headers = append(headers, config.NameStringValuePair{ - Name: k, - Value: &v, - }) - } - return headers -} - -func otlpV02ToV03(in *configv02.OTLP) *config.OTLP { - if in == nil { - return nil - } - return &config.OTLP{ - Certificate: in.Certificate, - ClientCertificate: in.ClientCertificate, - ClientKey: in.ClientKey, - Compression: in.Compression, - Endpoint: normalizeEndpoint(in.Endpoint, in.Insecure), - Insecure: in.Insecure, - Protocol: &in.Protocol, - Timeout: in.Timeout, - Headers: headersV02ToV03(in.Headers), - } -} - -func otlpMetricV02ToV03(in *configv02.OTLPMetric) *config.OTLPMetric { - if in == nil { - return nil - } - return &config.OTLPMetric{ - Certificate: in.Certificate, - ClientCertificate: in.ClientCertificate, - ClientKey: in.ClientKey, - Compression: in.Compression, - Endpoint: normalizeEndpoint(in.Endpoint, in.Insecure), - Insecure: in.Insecure, - Protocol: &in.Protocol, - Timeout: in.Timeout, - Headers: headersV02ToV03(in.Headers), - } -} - -func zipkinV02ToV03(in *configv02.Zipkin) *config.Zipkin { - if in == nil { - return nil - } - return &config.Zipkin{ - Endpoint: &in.Endpoint, - Timeout: in.Timeout, - } -} - -func tracesConfigV02ToV03(v2 tracesConfigV020, v3 *TracesConfigV030) error { - processors := make([]config.SpanProcessor, len(v2.Processors)) - for idx, p := range v2.Processors { - processors[idx] = config.SpanProcessor{} - if p.Batch != nil { - processors[idx].Batch = &config.BatchSpanProcessor{ - ExportTimeout: p.Batch.ExportTimeout, - MaxExportBatchSize: p.Batch.MaxExportBatchSize, - MaxQueueSize: p.Batch.MaxQueueSize, - ScheduleDelay: p.Batch.ScheduleDelay, - Exporter: config.SpanExporter{ - Console: config.Console(p.Batch.Exporter.Console), - OTLP: otlpV02ToV03(p.Batch.Exporter.OTLP), - Zipkin: zipkinV02ToV03(p.Batch.Exporter.Zipkin), - AdditionalProperties: p.Batch.Exporter.AdditionalProperties, - }, - } - } - if p.Simple != nil { - processors[idx].Simple = &config.SimpleSpanProcessor{ - Exporter: config.SpanExporter{ - Console: config.Console(p.Simple.Exporter.Console), - OTLP: otlpV02ToV03(p.Simple.Exporter.OTLP), - Zipkin: zipkinV02ToV03(p.Simple.Exporter.Zipkin), - AdditionalProperties: p.Simple.Exporter.AdditionalProperties, - }, - } - } - processors[idx].AdditionalProperties = p.AdditionalProperties - } - v3.Level = v2.Level - v3.Propagators = v2.Propagators - v3.Processors = processors - return nil -} - -func prometheusV02ToV03(in *configv02.Prometheus) *config.Prometheus { - if in == nil { - return nil - } - return &config.Prometheus{ - Host: in.Host, - Port: in.Port, - WithoutScopeInfo: in.WithoutScopeInfo, - WithoutUnits: in.WithoutUnits, - WithoutTypeSuffix: in.WithoutTypeSuffix, - WithResourceConstantLabels: (*config.IncludeExclude)(in.WithResourceConstantLabels), - } -} - -func metricsConfigV02ToV03(v2 metricsConfigV020, v3 *MetricsConfigV030) error { - readers := make([]config.MetricReader, len(v2.Readers)) - for idx, p := range v2.Readers { - readers[idx] = config.MetricReader{} - if p.Periodic != nil { - readers[idx].Periodic = &config.PeriodicMetricReader{ - Interval: p.Periodic.Interval, - Timeout: p.Periodic.Timeout, - Exporter: config.PushMetricExporter{ - Console: config.Console(p.Periodic.Exporter.Console), - OTLP: otlpMetricV02ToV03(p.Periodic.Exporter.OTLP), - AdditionalProperties: p.Periodic.Exporter.AdditionalProperties, - }, - } - } - if p.Pull != nil { - readers[idx].Pull = &config.PullMetricReader{ - Exporter: config.PullMetricExporter{ - Prometheus: prometheusV02ToV03(p.Pull.Exporter.Prometheus), - AdditionalProperties: p.Pull.Exporter.AdditionalProperties, - }, - } - } - } - v3.Level = v2.Level - v3.Readers = readers - return nil -} - -func logsConfigV02ToV03(v2 logsConfigV020, v3 *LogsConfigV030) error { - processors := make([]config.LogRecordProcessor, len(v2.Processors)) - for idx, p := range v2.Processors { - processors[idx] = config.LogRecordProcessor{} - if p.Batch != nil { - processors[idx].Batch = &config.BatchLogRecordProcessor{ - ExportTimeout: p.Batch.ExportTimeout, - MaxExportBatchSize: p.Batch.MaxExportBatchSize, - MaxQueueSize: p.Batch.MaxQueueSize, - ScheduleDelay: p.Batch.ScheduleDelay, - Exporter: config.LogRecordExporter{ - Console: config.Console(p.Batch.Exporter.Console), - OTLP: otlpV02ToV03(p.Batch.Exporter.OTLP), - AdditionalProperties: p.Batch.Exporter.AdditionalProperties, - }, - } - } - if p.Simple != nil { - processors[idx].Simple = &config.SimpleLogRecordProcessor{ - Exporter: config.LogRecordExporter{ - Console: config.Console(p.Simple.Exporter.Console), - OTLP: otlpV02ToV03(p.Simple.Exporter.OTLP), - AdditionalProperties: p.Simple.Exporter.AdditionalProperties, - }, - } - } - processors[idx].AdditionalProperties = p.AdditionalProperties - } - v3.Level = v2.Level - v3.Development = v2.Development - v3.Encoding = v2.Encoding - v3.DisableCaller = v2.DisableCaller - v3.DisableStacktrace = v2.DisableStacktrace - v3.Sampling = v2.Sampling - v3.OutputPaths = v2.OutputPaths - v3.ErrorOutputPaths = v2.ErrorOutputPaths - v3.InitialFields = v2.InitialFields - v3.Processors = processors - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/internal/migration/v0.3.0.go b/vendor/go.opentelemetry.io/collector/service/telemetry/internal/migration/v0.3.0.go deleted file mode 100644 index 6a1415cc753..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/internal/migration/v0.3.0.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package migration // import "go.opentelemetry.io/collector/service/telemetry/internal/migration" - -import ( - "time" - - config "go.opentelemetry.io/contrib/otelconf/v0.3.0" - "go.uber.org/zap/zapcore" - - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/confmap" -) - -type TracesConfigV030 struct { - // Level configures whether spans are emitted or not, the possible values are: - // - "none" indicates that no tracing data should be collected; - // - "basic" is the recommended and covers the basics of the service telemetry. - Level configtelemetry.Level `mapstructure:"level"` - // Propagators is a list of TextMapPropagators from the supported propagators list. Currently, - // tracecontext and b3 are supported. By default, the value is set to empty list and - // context propagation is disabled. - Propagators []string `mapstructure:"propagators"` - - config.TracerProvider `mapstructure:",squash"` -} - -func (c *TracesConfigV030) Unmarshal(conf *confmap.Conf) error { - unmarshaled := *c - if err := conf.Unmarshal(c); err != nil { - v2TracesConfig := tracesConfigV020{ - Level: unmarshaled.Level, - Propagators: unmarshaled.Propagators, - } - // try unmarshaling using v0.2.0 struct - if e := conf.Unmarshal(&v2TracesConfig); e != nil { - // error could not be resolved through backwards - // compatibility attempts - return err - } - // TODO: add a warning log to tell users to migrate their config - return tracesConfigV02ToV03(v2TracesConfig, c) - } - // ensure endpoint normalization occurs - for _, r := range c.Processors { - if r.Batch != nil { - if r.Batch.Exporter.OTLP != nil { - r.Batch.Exporter.OTLP.Endpoint = normalizeEndpoint(*r.Batch.Exporter.OTLP.Endpoint, r.Batch.Exporter.OTLP.Insecure) - } - } - if r.Simple != nil { - if r.Simple.Exporter.OTLP != nil && r.Simple.Exporter.OTLP.Endpoint != nil { - r.Simple.Exporter.OTLP.Endpoint = normalizeEndpoint(*r.Simple.Exporter.OTLP.Endpoint, r.Simple.Exporter.OTLP.Insecure) - } - } - } - return nil -} - -type MetricsConfigV030 struct { - // Level is the level of telemetry metrics, the possible values are: - // - "none" indicates that no telemetry data should be collected; - // - "basic" is the recommended and covers the basics of the service telemetry. - // - "normal" adds some other indicators on top of basic. - // - "detailed" adds dimensions and views to the previous levels. - Level configtelemetry.Level `mapstructure:"level"` - - config.MeterProvider `mapstructure:",squash"` -} - -func (c *MetricsConfigV030) Unmarshal(conf *confmap.Conf) error { - unmarshaled := *c - if err := conf.Unmarshal(c); err != nil { - v02 := metricsConfigV020{ - Level: unmarshaled.Level, - } - // try unmarshaling using v0.2.0 struct - if e := conf.Unmarshal(&v02); e != nil { - // error could not be resolved through backwards - // compatibility attempts - return err - } - // TODO: add a warning log to tell users to migrate their config - return metricsConfigV02ToV03(v02, c) - } - // ensure endpoint normalization occurs - for _, r := range c.Readers { - if r.Periodic != nil { - if r.Periodic.Exporter.OTLP != nil && r.Periodic.Exporter.OTLP.Endpoint != nil { - r.Periodic.Exporter.OTLP.Endpoint = normalizeEndpoint(*r.Periodic.Exporter.OTLP.Endpoint, r.Periodic.Exporter.OTLP.Insecure) - } - } - } - return nil -} - -type LogsConfigV030 struct { - // Level is the minimum enabled logging level. - // (default = "INFO") - Level zapcore.Level `mapstructure:"level"` - - // Development puts the logger in development mode, which changes the - // behavior of DPanicLevel and takes stacktraces more liberally. - // (default = false) - Development bool `mapstructure:"development,omitempty"` - - // Encoding sets the logger's encoding. - // Example values are "json", "console". - Encoding string `mapstructure:"encoding"` - - // DisableCaller stops annotating logs with the calling function's file - // name and line number. By default, all logs are annotated. - // (default = false) - DisableCaller bool `mapstructure:"disable_caller,omitempty"` - - // DisableStacktrace completely disables automatic stacktrace capturing. By - // default, stacktraces are captured for WarnLevel and above logs in - // development and ErrorLevel and above in production. - // (default = false) - DisableStacktrace bool `mapstructure:"disable_stacktrace,omitempty"` - - // Sampling sets a sampling policy. - // Default: - // sampling: - // enabled: true - // tick: 10s - // initial: 10 - // thereafter: 100 - // Sampling can be disabled by setting 'enabled' to false - Sampling *LogsSamplingConfig `mapstructure:"sampling"` - - // OutputPaths is a list of URLs or file paths to write logging output to. - // The URLs could only be with "file" schema or without schema. - // The URLs with "file" schema must be an absolute path. - // The URLs without schema are treated as local file paths. - // "stdout" and "stderr" are interpreted as os.Stdout and os.Stderr. - // see details at Open in zap/writer.go. - // (default = ["stderr"]) - OutputPaths []string `mapstructure:"output_paths"` - - // ErrorOutputPaths is a list of URLs or file paths to write zap internal logger errors to. - // The URLs could only be with "file" schema or without schema. - // The URLs with "file" schema must use absolute paths. - // The URLs without schema are treated as local file paths. - // "stdout" and "stderr" are interpreted as os.Stdout and os.Stderr. - // see details at Open in zap/writer.go. - // - // Note that this setting only affects the zap internal logger errors. - // (default = ["stderr"]) - ErrorOutputPaths []string `mapstructure:"error_output_paths"` - - // InitialFields is a collection of fields to add to the root logger. - // Example: - // - // initial_fields: - // foo: "bar" - // - // By default, there is no initial field. - InitialFields map[string]any `mapstructure:"initial_fields,omitempty"` - - // Processors allow configuration of log record processors to emit logs to - // any number of supported backends. - Processors []config.LogRecordProcessor `mapstructure:"processors,omitempty"` -} - -// LogsSamplingConfig sets a sampling strategy for the logger. Sampling caps the -// global CPU and I/O load that logging puts on your process while attempting -// to preserve a representative subset of your logs. -type LogsSamplingConfig struct { - // Enabled enable sampling logging - Enabled bool `mapstructure:"enabled"` - // Tick represents the interval in seconds that the logger apply each sampling. - Tick time.Duration `mapstructure:"tick"` - // Initial represents the first M messages logged each Tick. - Initial int `mapstructure:"initial"` - // Thereafter represents the sampling rate, every Nth message will be sampled after Initial messages are logged during each Tick. - // If Thereafter is zero, the logger will drop all the messages after the Initial each Tick. - Thereafter int `mapstructure:"thereafter"` -} - -func (c *LogsConfigV030) Unmarshal(conf *confmap.Conf) error { - unmarshaled := *c - if err := conf.Unmarshal(c); err != nil { - v02 := logsConfigV020{ - Level: unmarshaled.Level, - Development: unmarshaled.Development, - Encoding: unmarshaled.Encoding, - DisableCaller: unmarshaled.DisableCaller, - DisableStacktrace: unmarshaled.DisableStacktrace, - Sampling: unmarshaled.Sampling, - OutputPaths: unmarshaled.OutputPaths, - ErrorOutputPaths: unmarshaled.ErrorOutputPaths, - InitialFields: unmarshaled.InitialFields, - } - // try unmarshaling using v0.2.0 struct - if e := conf.Unmarshal(&v02); e != nil { - // error could not be resolved through backwards - // compatibility attempts - return err - } - // TODO: add a warning log to tell users to migrate their config - return logsConfigV02ToV03(v02, c) - } - // ensure endpoint normalization occurs - for _, r := range c.Processors { - if r.Batch != nil { - if r.Batch.Exporter.OTLP != nil { - r.Batch.Exporter.OTLP.Endpoint = normalizeEndpoint(*r.Batch.Exporter.OTLP.Endpoint, r.Batch.Exporter.OTLP.Insecure) - } - } - if r.Simple != nil { - if r.Simple.Exporter.OTLP != nil && r.Simple.Exporter.OTLP.Endpoint != nil { - r.Simple.Exporter.OTLP.Endpoint = normalizeEndpoint(*r.Simple.Exporter.OTLP.Endpoint, r.Simple.Exporter.OTLP.Insecure) - } - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/config.go b/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/config.go deleted file mode 100644 index 0229e7c740c..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/config.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelconftelemetry // import "go.opentelemetry.io/collector/service/telemetry/otelconftelemetry" - -import ( - "errors" - - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/service/telemetry/internal/migration" -) - -// Config defines the configurable settings for service telemetry. -type Config struct { - Logs LogsConfig `mapstructure:"logs"` - Metrics MetricsConfig `mapstructure:"metrics"` - Traces TracesConfig `mapstructure:"traces,omitempty"` - - // Resource specifies user-defined attributes to include with all emitted telemetry. - // Note that some attributes are added automatically (e.g. service.version) even - // if they are not specified here. In order to suppress such attributes the - // attribute must be specified in this map with null YAML value (nil string pointer). - Resource map[string]*string `mapstructure:"resource,omitempty"` -} - -// LogsConfig defines the configurable settings for service telemetry logs. -// This MUST be compatible with zap.Config. Cannot use directly zap.Config because -// the collector uses mapstructure and not yaml tags. -type LogsConfig = migration.LogsConfigV030 - -// LogsSamplingConfig sets a sampling strategy for the logger. Sampling caps the -// global CPU and I/O load that logging puts on your process while attempting -// to preserve a representative subset of your logs. -type LogsSamplingConfig = migration.LogsSamplingConfig - -// MetricsConfig exposes the common Telemetry configuration for one component. -// Experimental: *NOTE* this structure is subject to change or removal in the future. -type MetricsConfig = migration.MetricsConfigV030 - -// TracesConfig exposes the common Telemetry configuration for collector's internal spans. -// Experimental: *NOTE* this structure is subject to change or removal in the future. -type TracesConfig = migration.TracesConfigV030 - -// Validate checks whether the current configuration is valid -func (c *Config) Validate() error { - // Check when service telemetry metric level is not none, the metrics readers should not be empty - if c.Metrics.Level != configtelemetry.LevelNone && len(c.Metrics.Readers) == 0 { - return errors.New("collector telemetry metrics reader should exist when metric level is not none") - } - - if c.Metrics.Views != nil && c.Metrics.Level != configtelemetry.LevelDetailed { - return errors.New("service::telemetry::metrics::views can only be set when service::telemetry::metrics::level is detailed") - } - - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/factory.go b/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/factory.go deleted file mode 100644 index bffadf6be9d..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/factory.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelconftelemetry // import "go.opentelemetry.io/collector/service/telemetry/otelconftelemetry" - -import ( - "time" - - config "go.opentelemetry.io/contrib/otelconf/v0.3.0" - "go.uber.org/zap/zapcore" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/featuregate" - "go.opentelemetry.io/collector/service/telemetry" -) - -var useLocalHostAsDefaultMetricsAddressFeatureGate = featuregate.GlobalRegistry().MustRegister( - "telemetry.UseLocalHostAsDefaultMetricsAddress", - featuregate.StageBeta, - featuregate.WithRegisterFromVersion("v0.111.0"), - featuregate.WithRegisterDescription("controls whether default Prometheus metrics server use localhost as the default host for their endpoints"), -) - -// NewFactory creates a new telemetry.Factory that uses otelconf -// to configure opentelemetry-go SDK telemetry providers. -func NewFactory() telemetry.Factory { - return telemetry.NewFactory( - createDefaultConfig, - telemetry.WithCreateResource(createResource), - telemetry.WithCreateLogger(createLogger), - telemetry.WithCreateMeterProvider(createMeterProvider), - telemetry.WithCreateTracerProvider(createTracerProvider), - ) -} - -func createDefaultConfig() component.Config { - metricsHost := "localhost" - if !useLocalHostAsDefaultMetricsAddressFeatureGate.IsEnabled() { - metricsHost = "" - } - - return &Config{ - Logs: LogsConfig{ - Level: zapcore.InfoLevel, - Development: false, - Encoding: "console", - Sampling: &LogsSamplingConfig{ - Enabled: true, - Tick: 10 * time.Second, - Initial: 10, - Thereafter: 100, - }, - OutputPaths: []string{"stderr"}, - ErrorOutputPaths: []string{"stderr"}, - DisableCaller: false, - DisableStacktrace: false, - InitialFields: map[string]any(nil), - }, - Metrics: MetricsConfig{ - Level: configtelemetry.LevelNormal, - MeterProvider: config.MeterProvider{ - Readers: []config.MetricReader{ - { - Pull: &config.PullMetricReader{Exporter: config.PullMetricExporter{Prometheus: &config.Prometheus{ - WithoutScopeInfo: ptr(true), - WithoutUnits: ptr(true), - WithoutTypeSuffix: ptr(true), - Host: &metricsHost, - Port: ptr(8888), - WithResourceConstantLabels: &config.IncludeExclude{ - Included: []string{}, - }, - }}}, - }, - }, - }, - }, - } -} - -func ptr[T any](v T) *T { - return &v -} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/logger.go b/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/logger.go deleted file mode 100644 index 8af9830239a..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/logger.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelconftelemetry // import "go.opentelemetry.io/collector/service/telemetry/otelconftelemetry" - -import ( - "context" - - otelconf "go.opentelemetry.io/contrib/otelconf/v0.3.0" - "go.opentelemetry.io/otel/attribute" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/internal/telemetry/componentattribute" - "go.opentelemetry.io/collector/service/telemetry" -) - -// createLogger creates a Logger and a LoggerProvider from Config. -func createLogger( - ctx context.Context, - set telemetry.LoggerSettings, - componentConfig component.Config, -) (*zap.Logger, component.ShutdownFunc, error) { - cfg := componentConfig.(*Config) - res := newResource(set.Settings, cfg) - - // Copied from NewProductionConfig. - ec := zap.NewProductionEncoderConfig() - ec.EncodeTime = zapcore.ISO8601TimeEncoder - zapCfg := &zap.Config{ - Level: zap.NewAtomicLevelAt(cfg.Logs.Level), - Development: cfg.Logs.Development, - Encoding: cfg.Logs.Encoding, - EncoderConfig: ec, - OutputPaths: cfg.Logs.OutputPaths, - ErrorOutputPaths: cfg.Logs.ErrorOutputPaths, - DisableCaller: cfg.Logs.DisableCaller, - DisableStacktrace: cfg.Logs.DisableStacktrace, - InitialFields: cfg.Logs.InitialFields, - } - - if zapCfg.Encoding == "console" { - // Human-readable timestamps for console format of logs. - zapCfg.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder - } - - logger, err := zapCfg.Build(set.ZapOptions...) - if err != nil { - return nil, nil, err - } - - // The attributes in res.Attributes(), which are generated in telemetry.go, - // are added to logs exported through the LoggerProvider instantiated below. - // To make sure they are also exposed in logs written to stdout, we add - // them as fields to the Zap core created above using WrapCore. We do NOT - // add them to the logger using With, because that would apply to all logs, - // even ones exported through the core that wraps the LoggerProvider, - // meaning that the attributes would be exported twice. - if res != nil && len(res.Attributes()) > 0 { - logger = logger.WithOptions(zap.WrapCore(func(c zapcore.Core) zapcore.Core { - var fields []zap.Field - for _, attr := range res.Attributes() { - fields = append(fields, zap.String(string(attr.Key), attr.Value.Emit())) - } - - r := zap.Dict("resource", fields...) - return c.With([]zapcore.Field{r}) - })) - } - - if cfg.Logs.Sampling != nil && cfg.Logs.Sampling.Enabled { - logger = logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { - return zapcore.NewSamplerWithOptions( - core, - cfg.Logs.Sampling.Tick, - cfg.Logs.Sampling.Initial, - cfg.Logs.Sampling.Thereafter, - ) - })) - } - - sdk, err := newSDK(ctx, res, otelconf.OpenTelemetryConfiguration{ - LoggerProvider: &otelconf.LoggerProvider{ - Processors: cfg.Logs.Processors, - }, - }) - if err != nil { - return nil, nil, err - } - - // Wrap the zap.Logger with componentattribute so scope attributes - // can be added and removed dynamically, and tee logs to the - // LoggerProvider. - loggerProvider := sdk.LoggerProvider() - logger = logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { - core = componentattribute.NewConsoleCoreWithAttributes(core, attribute.NewSet()) - core = componentattribute.NewOTelTeeCoreWithAttributes( - core, - loggerProvider, - "go.opentelemetry.io/collector/service", - attribute.NewSet(), - ) - return core - })) - - return logger, sdk.Shutdown, nil -} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/metrics.go b/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/metrics.go deleted file mode 100644 index 37e76a67690..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/metrics.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelconftelemetry // import "go.opentelemetry.io/collector/service/telemetry/otelconftelemetry" - -import ( - "context" - - config "go.opentelemetry.io/contrib/otelconf/v0.3.0" - noopmetric "go.opentelemetry.io/otel/metric/noop" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/service/telemetry" -) - -func createMeterProvider( - ctx context.Context, - set telemetry.MeterSettings, - componentConfig component.Config, -) (telemetry.MeterProvider, error) { - cfg := componentConfig.(*Config) - if cfg.Metrics.Level == configtelemetry.LevelNone { - set.Logger.Info("Internal metrics telemetry disabled") - return noopMeterProvider{MeterProvider: noopmetric.NewMeterProvider()}, nil - } else if cfg.Metrics.Views == nil && set.DefaultViews != nil { - cfg.Metrics.Views = set.DefaultViews(cfg.Metrics.Level) - } - - res := newResource(set.Settings, cfg) - mpConfig := cfg.Metrics.MeterProvider - sdk, err := newSDK(ctx, res, config.OpenTelemetryConfiguration{ - MeterProvider: &mpConfig, - }) - if err != nil { - return nil, err - } - return sdk.MeterProvider().(telemetry.MeterProvider), nil -} - -type noopMeterProvider struct { - noopmetric.MeterProvider -} - -func (noopMeterProvider) Shutdown(context.Context) error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/resource.go b/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/resource.go deleted file mode 100644 index f419232baaf..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/resource.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelconftelemetry // import "go.opentelemetry.io/collector/service/telemetry/otelconftelemetry" - -import ( - "context" - "fmt" - - "go.opentelemetry.io/otel/attribute" - sdkresource "go.opentelemetry.io/otel/sdk/resource" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/service/internal/resource" - "go.opentelemetry.io/collector/service/telemetry" -) - -func createResource( - _ context.Context, - set telemetry.Settings, - componentConfig component.Config, -) (pcommon.Resource, error) { - res := newResource(set, componentConfig.(*Config)) - pcommonRes := pcommon.NewResource() - for _, keyValue := range res.Attributes() { - key := string(keyValue.Key) - pcommonRes.Attributes().PutStr(key, mustAttributeValueString(key, keyValue.Value)) - } - return pcommonRes, nil -} - -func newResource(set telemetry.Settings, cfg *Config) *sdkresource.Resource { - return resource.New(set.BuildInfo, cfg.Resource) -} - -func mustAttributeValueString(k string, v attribute.Value) string { - if v.Type() != attribute.STRING { - // We only support string-type resource attributes in the configuration. - panic(fmt.Errorf("attribute %q: expected string, got %s", k, v.Type())) - } - return v.AsString() -} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/sdk.go b/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/sdk.go deleted file mode 100644 index 7b4456ab311..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/sdk.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelconftelemetry // import "go.opentelemetry.io/collector/service/telemetry/otelconftelemetry" - -import ( - "context" - - config "go.opentelemetry.io/contrib/otelconf/v0.3.0" - sdkresource "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" -) - -func newSDK(ctx context.Context, res *sdkresource.Resource, conf config.OpenTelemetryConfiguration) (config.SDK, error) { - resourceAttrs := make([]config.AttributeNameValue, 0, res.Len()) - for _, r := range res.Attributes() { - key := string(r.Key) - resourceAttrs = append(resourceAttrs, config.AttributeNameValue{ - Name: key, - Value: mustAttributeValueString(key, r.Value), - }) - } - conf.Resource = &config.Resource{ - SchemaUrl: ptr(semconv.SchemaURL), - Attributes: resourceAttrs, - } - return config.NewSDK(config.WithContext(ctx), config.WithOpenTelemetryConfiguration(conf)) -} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/tracer.go b/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/tracer.go deleted file mode 100644 index 85972af4592..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/otelconftelemetry/tracer.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelconftelemetry // import "go.opentelemetry.io/collector/service/telemetry/otelconftelemetry" - -import ( - "context" - "errors" - "fmt" - - config "go.opentelemetry.io/contrib/otelconf/v0.3.0" - "go.opentelemetry.io/contrib/propagators/b3" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/embedded" - "go.opentelemetry.io/otel/trace/noop" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/featuregate" - "go.opentelemetry.io/collector/service/telemetry" -) - -var _ = featuregate.GlobalRegistry().MustRegister("service.noopTracerProvider", - featuregate.StageDeprecated, - featuregate.WithRegisterFromVersion("v0.107.0"), - featuregate.WithRegisterToVersion("v0.109.0"), - featuregate.WithRegisterDescription("Sets a Noop OpenTelemetry TracerProvider to reduce memory allocations. This featuregate is incompatible with the zPages extension.")) - -const ( - // supported trace propagators - traceContextPropagator = "tracecontext" - b3Propagator = "b3" -) - -func createTracerProvider( - ctx context.Context, - set telemetry.TracerSettings, - componentConfig component.Config, -) (telemetry.TracerProvider, error) { - cfg := componentConfig.(*Config) - if cfg.Traces.Level == configtelemetry.LevelNone { - set.Logger.Info("Internal trace telemetry disabled") - return &noopNoContextTracerProvider{}, nil - } - - propagator, err := textMapPropagatorFromConfig(cfg.Traces.Propagators) - if err != nil { - return nil, fmt.Errorf("error creating propagator: %w", err) - } - otel.SetTextMapPropagator(propagator) - - res := newResource(set.Settings, cfg) - sdk, err := newSDK(ctx, res, config.OpenTelemetryConfiguration{ - TracerProvider: &cfg.Traces.TracerProvider, - }) - if err != nil { - return nil, err - } - return sdk.TracerProvider().(telemetry.TracerProvider), nil -} - -var errUnsupportedPropagator = errors.New("unsupported trace propagator") - -type noopNoContextTracer struct { - embedded.Tracer -} - -var noopSpan = noop.Span{} - -func (n *noopNoContextTracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { - return ctx, noopSpan -} - -type noopNoContextTracerProvider struct { - embedded.TracerProvider -} - -func (n *noopNoContextTracerProvider) Shutdown(_ context.Context) error { - return nil -} - -func (n *noopNoContextTracerProvider) Tracer(_ string, _ ...trace.TracerOption) trace.Tracer { - return &noopNoContextTracer{} -} - -func textMapPropagatorFromConfig(props []string) (propagation.TextMapPropagator, error) { - var textMapPropagators []propagation.TextMapPropagator - for _, prop := range props { - switch prop { - case traceContextPropagator: - textMapPropagators = append(textMapPropagators, propagation.TraceContext{}) - case b3Propagator: - textMapPropagators = append(textMapPropagators, b3.New()) - default: - return nil, errUnsupportedPropagator - } - } - return propagation.NewCompositeTextMapPropagator(textMapPropagators...), nil -} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go b/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go index 11f770f41af..fcc0d6cf0cb 100644 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go @@ -23,7 +23,18 @@ type LoggerSettings struct { Settings // ZapOptions contains options for creating the zap logger. + // + // Deprecated [v0.142.0]: use BuildZapLogger instead. + // This field will be removed in the future, and options + // must be injected through BuildZapLogger. ZapOptions []zap.Option + + // BuildZapLogger holds a function for building a *zap.Logger + // from a zap.Config and options. + // + // If BuildZapLogger is nil, zap.Config.Build should be used. + // NOTE: in the future this field will be required. + BuildZapLogger func(zap.Config, ...zap.Option) (*zap.Logger, error) } // MeterSettings holds settings for building meter providers. @@ -60,6 +71,9 @@ type TracerSettings struct { type Settings struct { // BuildInfo contains build information about the collector. BuildInfo component.BuildInfo + + // Resource is the telemetry resource that should be used by all telemetry providers. + Resource *pcommon.Resource } // Factory is a factory interface for internal telemetry. diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md deleted file mode 100644 index 5565260ae55..00000000000 --- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# OpenTelemetry Zap Log Bridge - -[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/contrib/bridges/otelzap.svg)](https://pkg.go.dev/go.opentelemetry.io/contrib/bridges/otelzap) diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go deleted file mode 100644 index 0606775cd03..00000000000 --- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go +++ /dev/null @@ -1,128 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/logutil/convert.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" - -import ( - "fmt" - "math" - "reflect" - "strconv" - "time" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/log" -) - -// convertValue converts various types to log.Value. -func convertValue(v any) log.Value { - // Handling the most common types without reflect is a small perf win. - switch val := v.(type) { - case bool: - return log.BoolValue(val) - case string: - return log.StringValue(val) - case int: - return log.Int64Value(int64(val)) - case int8: - return log.Int64Value(int64(val)) - case int16: - return log.Int64Value(int64(val)) - case int32: - return log.Int64Value(int64(val)) - case int64: - return log.Int64Value(val) - case uint: - return convertUintValue(uint64(val)) - case uint8: - return log.Int64Value(int64(val)) - case uint16: - return log.Int64Value(int64(val)) - case uint32: - return log.Int64Value(int64(val)) - case uint64: - return convertUintValue(val) - case uintptr: - return convertUintValue(uint64(val)) - case float32: - return log.Float64Value(float64(val)) - case float64: - return log.Float64Value(val) - case time.Duration: - return log.Int64Value(val.Nanoseconds()) - case complex64: - r := log.Float64("r", real(complex128(val))) - i := log.Float64("i", imag(complex128(val))) - return log.MapValue(r, i) - case complex128: - r := log.Float64("r", real(val)) - i := log.Float64("i", imag(val)) - return log.MapValue(r, i) - case time.Time: - return log.Int64Value(val.UnixNano()) - case []byte: - return log.BytesValue(val) - case error: - return log.StringValue(val.Error()) - case attribute.Value: - return log.ValueFromAttribute(val) - case log.Value: - return val - } - - t := reflect.TypeOf(v) - if t == nil { - return log.Value{} - } - val := reflect.ValueOf(v) - switch t.Kind() { - case reflect.Struct: - return log.StringValue(fmt.Sprintf("%+v", v)) - case reflect.Slice, reflect.Array: - items := make([]log.Value, 0, val.Len()) - for i := 0; i < val.Len(); i++ { - items = append(items, convertValue(val.Index(i).Interface())) - } - return log.SliceValue(items...) - case reflect.Map: - kvs := make([]log.KeyValue, 0, val.Len()) - for _, k := range val.MapKeys() { - var key string - switch k.Kind() { - case reflect.String: - key = k.String() - default: - key = fmt.Sprintf("%+v", k.Interface()) - } - kvs = append(kvs, log.KeyValue{ - Key: key, - Value: convertValue(val.MapIndex(k).Interface()), - }) - } - return log.MapValue(kvs...) - case reflect.Ptr, reflect.Interface: - if val.IsNil() { - return log.Value{} - } - return convertValue(val.Elem().Interface()) - } - - // Try to handle this as gracefully as possible. - // - // Don't panic here. it is preferable to have user's open issue - // asking why their attributes have a "unhandled: " prefix than - // say that their code is panicking. - return log.StringValue(fmt.Sprintf("unhandled: (%s) %+v", t, v)) -} - -// convertUintValue converts a uint64 to a log.Value. -// If the value is too large to fit in an int64, it is converted to a string. -func convertUintValue(v uint64) log.Value { - if v > math.MaxInt64 { - return log.StringValue(strconv.FormatUint(v, 10)) - } - return log.Int64Value(int64(v)) -} diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go deleted file mode 100644 index 0cae1efb44f..00000000000 --- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package otelzap provides a bridge between the [go.uber.org/zap] and -// [OpenTelemetry]. -// -// # Record Conversion -// -// The [zapcore.Entry] and [zapcore.Field] are converted to OpenTelemetry [log.Record] in the following -// way: -// -// - Time is set as the Timestamp. -// - Message is set as the Body using a [log.StringValue]. -// - Level is transformed and set as the Severity. The SeverityText is also -// set. -// - Fields are transformed and set as the Attributes. -// - Field value of type [context.Context] is used as context when emitting log records. -// - For named loggers, LoggerName is used to access [log.Logger] from [log.LoggerProvider] -// -// The Level is transformed to the OpenTelemetry Severity types in the following way. -// -// - [zapcore.DebugLevel] is transformed to [log.SeverityDebug] -// - [zapcore.InfoLevel] is transformed to [log.SeverityInfo] -// - [zapcore.WarnLevel] is transformed to [log.SeverityWarn] -// - [zapcore.ErrorLevel] is transformed to [log.SeverityError] -// - [zapcore.DPanicLevel] is transformed to [log.SeverityFatal1] -// - [zapcore.PanicLevel] is transformed to [log.SeverityFatal2] -// - [zapcore.FatalLevel] is transformed to [log.SeverityFatal3] -// -// Fields are transformed based on their type into log attributes, or -// into a string value encoded using [fmt.Sprintf] if there is no matching type. -// -// [OpenTelemetry]: https://opentelemetry.io/docs/concepts/signals/logs/ -package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" - -import ( - "context" - "slices" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/log" - "go.opentelemetry.io/otel/log/global" - semconv "go.opentelemetry.io/otel/semconv/v1.37.0" - "go.uber.org/zap/zapcore" -) - -type config struct { - provider log.LoggerProvider - version string - schemaURL string - attributes []attribute.KeyValue -} - -func newConfig(options []Option) config { - var c config - for _, opt := range options { - c = opt.apply(c) - } - - if c.provider == nil { - c.provider = global.GetLoggerProvider() - } - - return c -} - -// Option configures a [Core]. -type Option interface { - apply(config) config -} - -type optFunc func(config) config - -func (f optFunc) apply(c config) config { return f(c) } - -// WithVersion returns an [Option] that configures the version of the -// [log.Logger] used by a [Core]. The version should be the version of the -// package that is being logged. -func WithVersion(version string) Option { - return optFunc(func(c config) config { - c.version = version - return c - }) -} - -// WithSchemaURL returns an [Option] that configures the semantic convention -// schema URL of the [log.Logger] used by a [Core]. The schemaURL should be -// the schema URL for the semantic conventions used in log records. -func WithSchemaURL(schemaURL string) Option { - return optFunc(func(c config) config { - c.schemaURL = schemaURL - return c - }) -} - -// WithAttributes returns an [Option] that configures the instrumentation scope -// attributes of the [log.Logger] used by a [Core]. -func WithAttributes(attributes ...attribute.KeyValue) Option { - return optFunc(func(c config) config { - c.attributes = attributes - return c - }) -} - -// WithLoggerProvider returns an [Option] that configures [log.LoggerProvider] -// used by a [Core] to create its [log.Logger]. -// -// By default if this Option is not provided, the Handler will use the global -// LoggerProvider. -func WithLoggerProvider(provider log.LoggerProvider) Option { - return optFunc(func(c config) config { - c.provider = provider - return c - }) -} - -// Core is a [zapcore.Core] that sends logging records to OpenTelemetry. -type Core struct { - provider log.LoggerProvider - logger log.Logger - opts []log.LoggerOption - attr []log.KeyValue - ctx context.Context -} - -// Compile-time check *Core implements zapcore.Core. -var _ zapcore.Core = (*Core)(nil) - -// NewCore creates a new [zapcore.Core] that can be used with [go.uber.org/zap.New]. -// The name should be the package import path that is being logged. -// The name is ignored for named loggers created using [go.uber.org/zap.Logger.Named]. -func NewCore(name string, opts ...Option) *Core { - cfg := newConfig(opts) - - var loggerOpts []log.LoggerOption - if cfg.version != "" { - loggerOpts = append(loggerOpts, log.WithInstrumentationVersion(cfg.version)) - } - if cfg.schemaURL != "" { - loggerOpts = append(loggerOpts, log.WithSchemaURL(cfg.schemaURL)) - } - if cfg.attributes != nil { - loggerOpts = append(loggerOpts, log.WithInstrumentationAttributes(cfg.attributes...)) - } - - logger := cfg.provider.Logger(name, loggerOpts...) - - return &Core{ - provider: cfg.provider, - logger: logger, - opts: loggerOpts, - ctx: context.Background(), - } -} - -// Enabled decides whether a given logging level is enabled when logging a message. -func (o *Core) Enabled(level zapcore.Level) bool { - param := log.EnabledParameters{Severity: convertLevel(level)} - return o.logger.Enabled(context.Background(), param) -} - -// With adds structured context to the Core. -func (o *Core) With(fields []zapcore.Field) zapcore.Core { - cloned := o.clone() - if len(fields) > 0 { - ctx, attrbuf := convertField(fields) - if ctx != nil { - cloned.ctx = ctx - } - cloned.attr = append(cloned.attr, attrbuf...) - } - return cloned -} - -func (o *Core) clone() *Core { - return &Core{ - provider: o.provider, - opts: o.opts, - logger: o.logger, - attr: slices.Clone(o.attr), - ctx: o.ctx, - } -} - -// Sync flushes buffered logs (if any). -func (*Core) Sync() error { - return nil -} - -// Check determines whether the supplied Entry should be logged. -// If the entry should be logged, the Core adds itself to the CheckedEntry and returns the result. -func (o *Core) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { - param := log.EnabledParameters{Severity: convertLevel(ent.Level)} - - logger := o.logger - if ent.LoggerName != "" { - logger = o.provider.Logger(ent.LoggerName, o.opts...) - } - - if logger.Enabled(context.Background(), param) { - return ce.AddCore(ent, o) - } - return ce -} - -// Write method encodes zap fields to OTel logs and emits them. -func (o *Core) Write(ent zapcore.Entry, fields []zapcore.Field) error { - r := log.Record{} - r.SetTimestamp(ent.Time) - r.SetBody(log.StringValue(ent.Message)) - r.SetSeverity(convertLevel(ent.Level)) - r.SetSeverityText(ent.Level.String()) - - r.AddAttributes(o.attr...) - if ent.Caller.Defined { - r.AddAttributes( - log.String(string(semconv.CodeFilePathKey), ent.Caller.File), - log.Int(string(semconv.CodeLineNumberKey), ent.Caller.Line), - log.String(string(semconv.CodeFunctionNameKey), ent.Caller.Function), - ) - } - if ent.Stack != "" { - r.AddAttributes(log.String(string(semconv.CodeStacktraceKey), ent.Stack)) - } - emitCtx := o.ctx - if len(fields) > 0 { - ctx, attrbuf := convertField(fields) - if ctx != nil { - emitCtx = ctx - } - r.AddAttributes(attrbuf...) - } - - logger := o.logger - if ent.LoggerName != "" { - logger = o.provider.Logger(ent.LoggerName, o.opts...) - } - logger.Emit(emitCtx, r) - return nil -} - -func convertField(fields []zapcore.Field) (context.Context, []log.KeyValue) { - var ctx context.Context - enc := newObjectEncoder(len(fields)) - for _, field := range fields { - if ctxFld, ok := field.Interface.(context.Context); ok { - ctx = ctxFld - continue - } - field.AddTo(enc) - } - - enc.calculate(enc.root) - return ctx, enc.root.attrs -} - -func convertLevel(level zapcore.Level) log.Severity { - switch level { - case zapcore.DebugLevel: - return log.SeverityDebug - case zapcore.InfoLevel: - return log.SeverityInfo - case zapcore.WarnLevel: - return log.SeverityWarn - case zapcore.ErrorLevel: - return log.SeverityError - case zapcore.DPanicLevel: - return log.SeverityFatal1 - case zapcore.PanicLevel: - return log.SeverityFatal2 - case zapcore.FatalLevel: - return log.SeverityFatal3 - default: - return log.SeverityUndefined - } -} diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go deleted file mode 100644 index 8d868e088eb..00000000000 --- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" - -import ( - "time" - - "go.opentelemetry.io/otel/log" - "go.uber.org/zap/zapcore" -) - -var ( - _ zapcore.ObjectEncoder = (*objectEncoder)(nil) - _ zapcore.ArrayEncoder = (*arrayEncoder)(nil) -) - -type namespace struct { - name string - attrs []log.KeyValue - next *namespace -} - -// objectEncoder implements zapcore.ObjectEncoder. -// It encodes given fields to OTel key-values. -type objectEncoder struct { - // root is a pointer to the default namespace - root *namespace - // cur is a pointer to the namespace we're currently writing to. - cur *namespace -} - -func newObjectEncoder(n int) *objectEncoder { - keyval := make([]log.KeyValue, 0, n) - m := &namespace{ - attrs: keyval, - } - return &objectEncoder{ - root: m, - cur: m, - } -} - -// It iterates to the end of the linked list and appends namespace data. -// Run this function before accessing complete result. -func (m *objectEncoder) calculate(o *namespace) { - if o.next == nil { - return - } - m.calculate(o.next) - o.attrs = append(o.attrs, log.Map(o.next.name, o.next.attrs...)) -} - -func (m *objectEncoder) AddArray(key string, v zapcore.ArrayMarshaler) error { - arr := newArrayEncoder() - err := v.MarshalLogArray(arr) - m.cur.attrs = append(m.cur.attrs, log.Slice(key, arr.elems...)) - return err -} - -func (m *objectEncoder) AddObject(k string, v zapcore.ObjectMarshaler) error { - // Similar to console_encoder which uses capacity of 2: - // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33. - newobj := newObjectEncoder(2) - err := v.MarshalLogObject(newobj) - newobj.calculate(newobj.root) - m.cur.attrs = append(m.cur.attrs, log.Map(k, newobj.root.attrs...)) - return err -} - -func (m *objectEncoder) AddBinary(k string, v []byte) { - m.cur.attrs = append(m.cur.attrs, log.Bytes(k, v)) -} - -func (m *objectEncoder) AddByteString(k string, v []byte) { - m.cur.attrs = append(m.cur.attrs, log.String(k, string(v))) -} - -func (m *objectEncoder) AddBool(k string, v bool) { - m.cur.attrs = append(m.cur.attrs, log.Bool(k, v)) -} - -func (m *objectEncoder) AddDuration(k string, v time.Duration) { - m.AddInt64(k, v.Nanoseconds()) -} - -func (m *objectEncoder) AddComplex128(k string, v complex128) { - r := log.Float64("r", real(v)) - i := log.Float64("i", imag(v)) - m.cur.attrs = append(m.cur.attrs, log.Map(k, r, i)) -} - -func (m *objectEncoder) AddFloat64(k string, v float64) { - m.cur.attrs = append(m.cur.attrs, log.Float64(k, v)) -} - -func (m *objectEncoder) AddInt64(k string, v int64) { - m.cur.attrs = append(m.cur.attrs, log.Int64(k, v)) -} - -func (m *objectEncoder) AddInt(k string, v int) { - m.cur.attrs = append(m.cur.attrs, log.Int(k, v)) -} - -func (m *objectEncoder) AddString(k, v string) { - m.cur.attrs = append(m.cur.attrs, log.String(k, v)) -} - -func (m *objectEncoder) AddUint64(k string, v uint64) { - m.cur.attrs = append(m.cur.attrs, - log.KeyValue{ - Key: k, - Value: assignUintValue(v), - }) -} - -func (m *objectEncoder) AddReflected(k string, v any) error { - m.cur.attrs = append(m.cur.attrs, - log.KeyValue{ - Key: k, - Value: convertValue(v), - }) - return nil -} - -// OpenNamespace opens an isolated namespace where all subsequent fields will -// be added. -func (m *objectEncoder) OpenNamespace(k string) { - keyValue := make([]log.KeyValue, 0, 5) - s := &namespace{ - name: k, - attrs: keyValue, - } - m.cur.next = s - m.cur = s -} - -func (m *objectEncoder) AddComplex64(k string, v complex64) { - m.AddComplex128(k, complex128(v)) -} - -func (m *objectEncoder) AddTime(k string, v time.Time) { - m.AddInt64(k, v.UnixNano()) -} - -func (m *objectEncoder) AddFloat32(k string, v float32) { - m.AddFloat64(k, float64(v)) -} - -func (m *objectEncoder) AddInt32(k string, v int32) { - m.AddInt64(k, int64(v)) -} - -func (m *objectEncoder) AddInt16(k string, v int16) { - m.AddInt64(k, int64(v)) -} - -func (m *objectEncoder) AddInt8(k string, v int8) { - m.AddInt64(k, int64(v)) -} - -func (m *objectEncoder) AddUint(k string, v uint) { - m.AddUint64(k, uint64(v)) -} - -func (m *objectEncoder) AddUint32(k string, v uint32) { - m.AddInt64(k, int64(v)) -} - -func (m *objectEncoder) AddUint16(k string, v uint16) { - m.AddInt64(k, int64(v)) -} - -func (m *objectEncoder) AddUint8(k string, v uint8) { - m.AddInt64(k, int64(v)) -} - -func (m *objectEncoder) AddUintptr(k string, v uintptr) { - m.AddUint64(k, uint64(v)) -} - -func assignUintValue(v uint64) log.Value { - const maxInt64 = ^uint64(0) >> 1 - if v > maxInt64 { - return log.Float64Value(float64(v)) - } - return log.Int64Value(int64(v)) -} - -// arrayEncoder implements [zapcore.ArrayEncoder]. -type arrayEncoder struct { - elems []log.Value -} - -func newArrayEncoder() *arrayEncoder { - return &arrayEncoder{ - // Similar to console_encoder which uses capacity of 2: - // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33. - elems: make([]log.Value, 0, 2), - } -} - -func (a *arrayEncoder) AppendArray(v zapcore.ArrayMarshaler) error { - arr := newArrayEncoder() - err := v.MarshalLogArray(arr) - a.elems = append(a.elems, log.SliceValue(arr.elems...)) - return err -} - -func (a *arrayEncoder) AppendObject(v zapcore.ObjectMarshaler) error { - // Similar to console_encoder which uses capacity of 2: - // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33. - m := newObjectEncoder(2) - err := v.MarshalLogObject(m) - m.calculate(m.root) - a.elems = append(a.elems, log.MapValue(m.root.attrs...)) - return err -} - -func (a *arrayEncoder) AppendReflected(v any) error { - a.elems = append(a.elems, convertValue(v)) - return nil -} - -func (a *arrayEncoder) AppendByteString(v []byte) { - a.elems = append(a.elems, log.StringValue(string(v))) -} - -func (a *arrayEncoder) AppendBool(v bool) { - a.elems = append(a.elems, log.BoolValue(v)) -} - -func (a *arrayEncoder) AppendFloat64(v float64) { - a.elems = append(a.elems, log.Float64Value(v)) -} - -func (a *arrayEncoder) AppendFloat32(v float32) { - a.AppendFloat64(float64(v)) -} - -func (a *arrayEncoder) AppendInt(v int) { - a.elems = append(a.elems, log.IntValue(v)) -} - -func (a *arrayEncoder) AppendInt64(v int64) { - a.elems = append(a.elems, log.Int64Value(v)) -} - -func (a *arrayEncoder) AppendString(v string) { - a.elems = append(a.elems, log.StringValue(v)) -} - -func (a *arrayEncoder) AppendComplex128(v complex128) { - r := log.Float64("r", real(v)) - i := log.Float64("i", imag(v)) - a.elems = append(a.elems, log.MapValue(r, i)) -} - -func (a *arrayEncoder) AppendUint64(v uint64) { - a.elems = append(a.elems, assignUintValue(v)) -} - -func (a *arrayEncoder) AppendComplex64(v complex64) { a.AppendComplex128(complex128(v)) } -func (a *arrayEncoder) AppendDuration(v time.Duration) { a.AppendInt64(v.Nanoseconds()) } -func (a *arrayEncoder) AppendInt32(v int32) { a.AppendInt64(int64(v)) } -func (a *arrayEncoder) AppendInt16(v int16) { a.AppendInt64(int64(v)) } -func (a *arrayEncoder) AppendInt8(v int8) { a.AppendInt64(int64(v)) } -func (a *arrayEncoder) AppendTime(v time.Time) { a.AppendInt64(v.UnixNano()) } -func (a *arrayEncoder) AppendUint(v uint) { a.AppendUint64(uint64(v)) } -func (a *arrayEncoder) AppendUint32(v uint32) { a.AppendInt64(int64(v)) } -func (a *arrayEncoder) AppendUint16(v uint16) { a.AppendInt64(int64(v)) } -func (a *arrayEncoder) AppendUint8(v uint8) { a.AppendInt64(int64(v)) } -func (a *arrayEncoder) AppendUintptr(v uintptr) { a.AppendUint64(uint64(v)) } diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go deleted file mode 100644 index 5c8b2eea7e4..00000000000 --- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" - -// Generate convert: -//go:generate gotmpl --body=../../internal/shared/logutil/convert_test.go.tmpl "--data={ \"pkg\": \"otelzap\" }" --out=convert_test.go -//go:generate gotmpl --body=../../internal/shared/logutil/convert.go.tmpl "--data={ \"pkg\": \"otelzap\" }" --out=convert.go diff --git a/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/config.go b/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/config.go deleted file mode 100644 index 1e989f5b8c7..00000000000 --- a/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/config.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package otelconf provides an OpenTelemetry declarative configuration SDK. -package otelconf // import "go.opentelemetry.io/contrib/otelconf/v0.2.0" - -import ( - "context" - "errors" - - "go.opentelemetry.io/otel/log" - nooplog "go.opentelemetry.io/otel/log/noop" - "go.opentelemetry.io/otel/metric" - noopmetric "go.opentelemetry.io/otel/metric/noop" - "go.opentelemetry.io/otel/trace" - nooptrace "go.opentelemetry.io/otel/trace/noop" - yaml "go.yaml.in/yaml/v3" -) - -const ( - protocolProtobufHTTP = "http/protobuf" - protocolProtobufGRPC = "grpc/protobuf" - - compressionGzip = "gzip" - compressionNone = "none" -) - -type configOptions struct { - ctx context.Context - opentelemetryConfig OpenTelemetryConfiguration -} - -type shutdownFunc func(context.Context) error - -func noopShutdown(context.Context) error { - return nil -} - -// SDK is a struct that contains all the providers -// configured via the configuration model. -type SDK struct { - meterProvider metric.MeterProvider - tracerProvider trace.TracerProvider - loggerProvider log.LoggerProvider - shutdown shutdownFunc -} - -// TracerProvider returns a configured trace.TracerProvider. -func (s *SDK) TracerProvider() trace.TracerProvider { - return s.tracerProvider -} - -// MeterProvider returns a configured metric.MeterProvider. -func (s *SDK) MeterProvider() metric.MeterProvider { - return s.meterProvider -} - -// LoggerProvider returns a configured log.LoggerProvider. -func (s *SDK) LoggerProvider() log.LoggerProvider { - return s.loggerProvider -} - -// Shutdown calls shutdown on all configured providers. -func (s *SDK) Shutdown(ctx context.Context) error { - return s.shutdown(ctx) -} - -var noopSDK = SDK{ - loggerProvider: nooplog.LoggerProvider{}, - meterProvider: noopmetric.MeterProvider{}, - tracerProvider: nooptrace.TracerProvider{}, - shutdown: func(context.Context) error { return nil }, -} - -// NewSDK creates SDK providers based on the configuration model. -func NewSDK(opts ...ConfigurationOption) (SDK, error) { - o := configOptions{} - for _, opt := range opts { - o = opt.apply(o) - } - if o.opentelemetryConfig.Disabled != nil && *o.opentelemetryConfig.Disabled { - return noopSDK, nil - } - - r, err := newResource(o.opentelemetryConfig.Resource) - if err != nil { - return noopSDK, err - } - - mp, mpShutdown, err := meterProvider(o, r) - if err != nil { - return noopSDK, err - } - - tp, tpShutdown, err := tracerProvider(o, r) - if err != nil { - return noopSDK, err - } - - lp, lpShutdown, err := loggerProvider(o, r) - if err != nil { - return noopSDK, err - } - - return SDK{ - meterProvider: mp, - tracerProvider: tp, - loggerProvider: lp, - shutdown: func(ctx context.Context) error { - return errors.Join(mpShutdown(ctx), tpShutdown(ctx), lpShutdown(ctx)) - }, - }, nil -} - -// ConfigurationOption configures options for providers. -type ConfigurationOption interface { - apply(configOptions) configOptions -} - -type configurationOptionFunc func(configOptions) configOptions - -func (fn configurationOptionFunc) apply(cfg configOptions) configOptions { - return fn(cfg) -} - -// WithContext sets the context.Context for the SDK. -func WithContext(ctx context.Context) ConfigurationOption { - return configurationOptionFunc(func(c configOptions) configOptions { - c.ctx = ctx - return c - }) -} - -// WithOpenTelemetryConfiguration sets the OpenTelemetryConfiguration used -// to produce the SDK. -func WithOpenTelemetryConfiguration(cfg OpenTelemetryConfiguration) ConfigurationOption { - return configurationOptionFunc(func(c configOptions) configOptions { - c.opentelemetryConfig = cfg - return c - }) -} - -// ParseYAML parses a YAML configuration file into an OpenTelemetryConfiguration. -func ParseYAML(file []byte) (*OpenTelemetryConfiguration, error) { - var cfg OpenTelemetryConfiguration - err := yaml.Unmarshal(file, &cfg) - if err != nil { - return nil, err - } - - return &cfg, nil -} diff --git a/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/generated_config.go b/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/generated_config.go deleted file mode 100644 index d437199519d..00000000000 --- a/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/generated_config.go +++ /dev/null @@ -1,780 +0,0 @@ -// Code generated by github.com/atombender/go-jsonschema, DO NOT EDIT. - -package otelconf - -import "encoding/json" -import "fmt" -import "reflect" - -type AttributeLimits struct { - // AttributeCountLimit corresponds to the JSON schema field - // "attribute_count_limit". - AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` - - // AttributeValueLengthLimit corresponds to the JSON schema field - // "attribute_value_length_limit". - AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` - - AdditionalProperties interface{} -} - -type Attributes map[string]interface{} - -type BatchLogRecordProcessor struct { - // ExportTimeout corresponds to the JSON schema field "export_timeout". - ExportTimeout *int `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` - - // Exporter corresponds to the JSON schema field "exporter". - Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` - - // MaxExportBatchSize corresponds to the JSON schema field - // "max_export_batch_size". - MaxExportBatchSize *int `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` - - // MaxQueueSize corresponds to the JSON schema field "max_queue_size". - MaxQueueSize *int `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` - - // ScheduleDelay corresponds to the JSON schema field "schedule_delay". - ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in BatchLogRecordProcessor: required") - } - type Plain BatchLogRecordProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = BatchLogRecordProcessor(plain) - return nil -} - -type BatchSpanProcessor struct { - // ExportTimeout corresponds to the JSON schema field "export_timeout". - ExportTimeout *int `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` - - // Exporter corresponds to the JSON schema field "exporter". - Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` - - // MaxExportBatchSize corresponds to the JSON schema field - // "max_export_batch_size". - MaxExportBatchSize *int `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` - - // MaxQueueSize corresponds to the JSON schema field "max_queue_size". - MaxQueueSize *int `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` - - // ScheduleDelay corresponds to the JSON schema field "schedule_delay". - ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in BatchSpanProcessor: required") - } - type Plain BatchSpanProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = BatchSpanProcessor(plain) - return nil -} - -type Common map[string]interface{} - -type Console map[string]interface{} - -type Detectors struct { - // Attributes corresponds to the JSON schema field "attributes". - Attributes *DetectorsAttributes `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` -} - -type DetectorsAttributes struct { - // Excluded corresponds to the JSON schema field "excluded". - Excluded []string `json:"excluded,omitempty" yaml:"excluded,omitempty" mapstructure:"excluded,omitempty"` - - // Included corresponds to the JSON schema field "included". - Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` -} - -type Headers map[string]string - -type IncludeExclude struct { - // Excluded corresponds to the JSON schema field "excluded". - Excluded []string `json:"excluded,omitempty" yaml:"excluded,omitempty" mapstructure:"excluded,omitempty"` - - // Included corresponds to the JSON schema field "included". - Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` -} - -type LogRecordExporter struct { - // Console corresponds to the JSON schema field "console". - Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - - // OTLP corresponds to the JSON schema field "otlp". - OTLP *OTLP `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` - - AdditionalProperties interface{} -} - -type LogRecordLimits struct { - // AttributeCountLimit corresponds to the JSON schema field - // "attribute_count_limit". - AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` - - // AttributeValueLengthLimit corresponds to the JSON schema field - // "attribute_value_length_limit". - AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` -} - -type LogRecordProcessor struct { - // Batch corresponds to the JSON schema field "batch". - Batch *BatchLogRecordProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` - - // Simple corresponds to the JSON schema field "simple". - Simple *SimpleLogRecordProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` - - AdditionalProperties interface{} -} - -type LoggerProvider struct { - // Limits corresponds to the JSON schema field "limits". - Limits *LogRecordLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Processors corresponds to the JSON schema field "processors". - Processors []LogRecordProcessor `json:"processors,omitempty" yaml:"processors,omitempty" mapstructure:"processors,omitempty"` -} - -type MeterProvider struct { - // Readers corresponds to the JSON schema field "readers". - Readers []MetricReader `json:"readers,omitempty" yaml:"readers,omitempty" mapstructure:"readers,omitempty"` - - // Views corresponds to the JSON schema field "views". - Views []View `json:"views,omitempty" yaml:"views,omitempty" mapstructure:"views,omitempty"` -} - -type MetricExporter struct { - // Console corresponds to the JSON schema field "console". - Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - - // OTLP corresponds to the JSON schema field "otlp". - OTLP *OTLPMetric `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` - - // Prometheus corresponds to the JSON schema field "prometheus". - Prometheus *Prometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` - - AdditionalProperties interface{} -} - -type MetricReader struct { - // Periodic corresponds to the JSON schema field "periodic". - Periodic *PeriodicMetricReader `json:"periodic,omitempty" yaml:"periodic,omitempty" mapstructure:"periodic,omitempty"` - - // Pull corresponds to the JSON schema field "pull". - Pull *PullMetricReader `json:"pull,omitempty" yaml:"pull,omitempty" mapstructure:"pull,omitempty"` -} - -type OTLP struct { - // Certificate corresponds to the JSON schema field "certificate". - Certificate *string `json:"certificate,omitempty" yaml:"certificate,omitempty" mapstructure:"certificate,omitempty"` - - // ClientCertificate corresponds to the JSON schema field "client_certificate". - ClientCertificate *string `json:"client_certificate,omitempty" yaml:"client_certificate,omitempty" mapstructure:"client_certificate,omitempty"` - - // ClientKey corresponds to the JSON schema field "client_key". - ClientKey *string `json:"client_key,omitempty" yaml:"client_key,omitempty" mapstructure:"client_key,omitempty"` - - // Compression corresponds to the JSON schema field "compression". - Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` - - // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` - - // Headers corresponds to the JSON schema field "headers". - Headers Headers `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` - - // Insecure corresponds to the JSON schema field "insecure". - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // Protocol corresponds to the JSON schema field "protocol". - Protocol string `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // Timeout corresponds to the JSON schema field "timeout". - Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` -} - -type OTLPMetric struct { - // Certificate corresponds to the JSON schema field "certificate". - Certificate *string `json:"certificate,omitempty" yaml:"certificate,omitempty" mapstructure:"certificate,omitempty"` - - // ClientCertificate corresponds to the JSON schema field "client_certificate". - ClientCertificate *string `json:"client_certificate,omitempty" yaml:"client_certificate,omitempty" mapstructure:"client_certificate,omitempty"` - - // ClientKey corresponds to the JSON schema field "client_key". - ClientKey *string `json:"client_key,omitempty" yaml:"client_key,omitempty" mapstructure:"client_key,omitempty"` - - // Compression corresponds to the JSON schema field "compression". - Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` - - // DefaultHistogramAggregation corresponds to the JSON schema field - // "default_histogram_aggregation". - DefaultHistogramAggregation *OTLPMetricDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` - - // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` - - // Headers corresponds to the JSON schema field "headers". - Headers Headers `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` - - // Insecure corresponds to the JSON schema field "insecure". - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // Protocol corresponds to the JSON schema field "protocol". - Protocol string `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // TemporalityPreference corresponds to the JSON schema field - // "temporality_preference". - TemporalityPreference *string `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` - - // Timeout corresponds to the JSON schema field "timeout". - Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` -} - -type OTLPMetricDefaultHistogramAggregation string - -const OTLPMetricDefaultHistogramAggregationBase2ExponentialBucketHistogram OTLPMetricDefaultHistogramAggregation = "base2_exponential_bucket_histogram" -const OTLPMetricDefaultHistogramAggregationExplicitBucketHistogram OTLPMetricDefaultHistogramAggregation = "explicit_bucket_histogram" - -var enumValues_OTLPMetricDefaultHistogramAggregation = []interface{}{ - "explicit_bucket_histogram", - "base2_exponential_bucket_histogram", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLPMetricDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_OTLPMetricDefaultHistogramAggregation { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_OTLPMetricDefaultHistogramAggregation, v) - } - *j = OTLPMetricDefaultHistogramAggregation(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLPMetric) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["endpoint"]; raw != nil && !ok { - return fmt.Errorf("field endpoint in OTLPMetric: required") - } - if _, ok := raw["protocol"]; raw != nil && !ok { - return fmt.Errorf("field protocol in OTLPMetric: required") - } - type Plain OTLPMetric - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OTLPMetric(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLP) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["endpoint"]; raw != nil && !ok { - return fmt.Errorf("field endpoint in OTLP: required") - } - if _, ok := raw["protocol"]; raw != nil && !ok { - return fmt.Errorf("field protocol in OTLP: required") - } - type Plain OTLP - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OTLP(plain) - return nil -} - -type OpenTelemetryConfiguration struct { - // AttributeLimits corresponds to the JSON schema field "attribute_limits". - AttributeLimits *AttributeLimits `json:"attribute_limits,omitempty" yaml:"attribute_limits,omitempty" mapstructure:"attribute_limits,omitempty"` - - // Disabled corresponds to the JSON schema field "disabled". - Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` - - // FileFormat corresponds to the JSON schema field "file_format". - FileFormat string `json:"file_format" yaml:"file_format" mapstructure:"file_format"` - - // LoggerProvider corresponds to the JSON schema field "logger_provider". - LoggerProvider *LoggerProvider `json:"logger_provider,omitempty" yaml:"logger_provider,omitempty" mapstructure:"logger_provider,omitempty"` - - // MeterProvider corresponds to the JSON schema field "meter_provider". - MeterProvider *MeterProvider `json:"meter_provider,omitempty" yaml:"meter_provider,omitempty" mapstructure:"meter_provider,omitempty"` - - // Propagator corresponds to the JSON schema field "propagator". - Propagator *Propagator `json:"propagator,omitempty" yaml:"propagator,omitempty" mapstructure:"propagator,omitempty"` - - // Resource corresponds to the JSON schema field "resource". - Resource *Resource `json:"resource,omitempty" yaml:"resource,omitempty" mapstructure:"resource,omitempty"` - - // TracerProvider corresponds to the JSON schema field "tracer_provider". - TracerProvider *TracerProvider `json:"tracer_provider,omitempty" yaml:"tracer_provider,omitempty" mapstructure:"tracer_provider,omitempty"` - - AdditionalProperties interface{} -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["file_format"]; raw != nil && !ok { - return fmt.Errorf("field file_format in OpenTelemetryConfiguration: required") - } - type Plain OpenTelemetryConfiguration - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OpenTelemetryConfiguration(plain) - return nil -} - -type PeriodicMetricReader struct { - // Exporter corresponds to the JSON schema field "exporter". - Exporter MetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` - - // Interval corresponds to the JSON schema field "interval". - Interval *int `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` - - // Timeout corresponds to the JSON schema field "timeout". - Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in PeriodicMetricReader: required") - } - type Plain PeriodicMetricReader - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = PeriodicMetricReader(plain) - return nil -} - -type Prometheus struct { - // Host corresponds to the JSON schema field "host". - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // Port corresponds to the JSON schema field "port". - Port *int `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` - - // WithResourceConstantLabels corresponds to the JSON schema field - // "with_resource_constant_labels". - WithResourceConstantLabels *IncludeExclude `json:"with_resource_constant_labels,omitempty" yaml:"with_resource_constant_labels,omitempty" mapstructure:"with_resource_constant_labels,omitempty"` - - // WithoutScopeInfo corresponds to the JSON schema field "without_scope_info". - WithoutScopeInfo *bool `json:"without_scope_info,omitempty" yaml:"without_scope_info,omitempty" mapstructure:"without_scope_info,omitempty"` - - // WithoutTypeSuffix corresponds to the JSON schema field "without_type_suffix". - WithoutTypeSuffix *bool `json:"without_type_suffix,omitempty" yaml:"without_type_suffix,omitempty" mapstructure:"without_type_suffix,omitempty"` - - // WithoutUnits corresponds to the JSON schema field "without_units". - WithoutUnits *bool `json:"without_units,omitempty" yaml:"without_units,omitempty" mapstructure:"without_units,omitempty"` -} - -type Propagator struct { - // Composite corresponds to the JSON schema field "composite". - Composite []string `json:"composite,omitempty" yaml:"composite,omitempty" mapstructure:"composite,omitempty"` - - AdditionalProperties interface{} -} - -type PullMetricReader struct { - // Exporter corresponds to the JSON schema field "exporter". - Exporter MetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *PullMetricReader) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in PullMetricReader: required") - } - type Plain PullMetricReader - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = PullMetricReader(plain) - return nil -} - -type Resource struct { - // Attributes corresponds to the JSON schema field "attributes". - Attributes Attributes `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` - - // Detectors corresponds to the JSON schema field "detectors". - Detectors *Detectors `json:"detectors,omitempty" yaml:"detectors,omitempty" mapstructure:"detectors,omitempty"` - - // SchemaUrl corresponds to the JSON schema field "schema_url". - SchemaUrl *string `json:"schema_url,omitempty" yaml:"schema_url,omitempty" mapstructure:"schema_url,omitempty"` -} - -type Sampler struct { - // AlwaysOff corresponds to the JSON schema field "always_off". - AlwaysOff SamplerAlwaysOff `json:"always_off,omitempty" yaml:"always_off,omitempty" mapstructure:"always_off,omitempty"` - - // AlwaysOn corresponds to the JSON schema field "always_on". - AlwaysOn SamplerAlwaysOn `json:"always_on,omitempty" yaml:"always_on,omitempty" mapstructure:"always_on,omitempty"` - - // JaegerRemote corresponds to the JSON schema field "jaeger_remote". - JaegerRemote *SamplerJaegerRemote `json:"jaeger_remote,omitempty" yaml:"jaeger_remote,omitempty" mapstructure:"jaeger_remote,omitempty"` - - // ParentBased corresponds to the JSON schema field "parent_based". - ParentBased *SamplerParentBased `json:"parent_based,omitempty" yaml:"parent_based,omitempty" mapstructure:"parent_based,omitempty"` - - // TraceIDRatioBased corresponds to the JSON schema field "trace_id_ratio_based". - TraceIDRatioBased *SamplerTraceIDRatioBased `json:"trace_id_ratio_based,omitempty" yaml:"trace_id_ratio_based,omitempty" mapstructure:"trace_id_ratio_based,omitempty"` - - AdditionalProperties interface{} -} - -type SamplerAlwaysOff map[string]interface{} - -type SamplerAlwaysOn map[string]interface{} - -type SamplerJaegerRemote struct { - // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // InitialSampler corresponds to the JSON schema field "initial_sampler". - InitialSampler *Sampler `json:"initial_sampler,omitempty" yaml:"initial_sampler,omitempty" mapstructure:"initial_sampler,omitempty"` - - // Interval corresponds to the JSON schema field "interval". - Interval *int `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` -} - -type SamplerParentBased struct { - // LocalParentNotSampled corresponds to the JSON schema field - // "local_parent_not_sampled". - LocalParentNotSampled *Sampler `json:"local_parent_not_sampled,omitempty" yaml:"local_parent_not_sampled,omitempty" mapstructure:"local_parent_not_sampled,omitempty"` - - // LocalParentSampled corresponds to the JSON schema field "local_parent_sampled". - LocalParentSampled *Sampler `json:"local_parent_sampled,omitempty" yaml:"local_parent_sampled,omitempty" mapstructure:"local_parent_sampled,omitempty"` - - // RemoteParentNotSampled corresponds to the JSON schema field - // "remote_parent_not_sampled". - RemoteParentNotSampled *Sampler `json:"remote_parent_not_sampled,omitempty" yaml:"remote_parent_not_sampled,omitempty" mapstructure:"remote_parent_not_sampled,omitempty"` - - // RemoteParentSampled corresponds to the JSON schema field - // "remote_parent_sampled". - RemoteParentSampled *Sampler `json:"remote_parent_sampled,omitempty" yaml:"remote_parent_sampled,omitempty" mapstructure:"remote_parent_sampled,omitempty"` - - // Root corresponds to the JSON schema field "root". - Root *Sampler `json:"root,omitempty" yaml:"root,omitempty" mapstructure:"root,omitempty"` -} - -type SamplerTraceIDRatioBased struct { - // Ratio corresponds to the JSON schema field "ratio". - Ratio *float64 `json:"ratio,omitempty" yaml:"ratio,omitempty" mapstructure:"ratio,omitempty"` -} - -type SimpleLogRecordProcessor struct { - // Exporter corresponds to the JSON schema field "exporter". - Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in SimpleLogRecordProcessor: required") - } - type Plain SimpleLogRecordProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SimpleLogRecordProcessor(plain) - return nil -} - -type SimpleSpanProcessor struct { - // Exporter corresponds to the JSON schema field "exporter". - Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in SimpleSpanProcessor: required") - } - type Plain SimpleSpanProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SimpleSpanProcessor(plain) - return nil -} - -type SpanExporter struct { - // Console corresponds to the JSON schema field "console". - Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - - // OTLP corresponds to the JSON schema field "otlp". - OTLP *OTLP `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` - - // Zipkin corresponds to the JSON schema field "zipkin". - Zipkin *Zipkin `json:"zipkin,omitempty" yaml:"zipkin,omitempty" mapstructure:"zipkin,omitempty"` - - AdditionalProperties interface{} -} - -type SpanLimits struct { - // AttributeCountLimit corresponds to the JSON schema field - // "attribute_count_limit". - AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` - - // AttributeValueLengthLimit corresponds to the JSON schema field - // "attribute_value_length_limit". - AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` - - // EventAttributeCountLimit corresponds to the JSON schema field - // "event_attribute_count_limit". - EventAttributeCountLimit *int `json:"event_attribute_count_limit,omitempty" yaml:"event_attribute_count_limit,omitempty" mapstructure:"event_attribute_count_limit,omitempty"` - - // EventCountLimit corresponds to the JSON schema field "event_count_limit". - EventCountLimit *int `json:"event_count_limit,omitempty" yaml:"event_count_limit,omitempty" mapstructure:"event_count_limit,omitempty"` - - // LinkAttributeCountLimit corresponds to the JSON schema field - // "link_attribute_count_limit". - LinkAttributeCountLimit *int `json:"link_attribute_count_limit,omitempty" yaml:"link_attribute_count_limit,omitempty" mapstructure:"link_attribute_count_limit,omitempty"` - - // LinkCountLimit corresponds to the JSON schema field "link_count_limit". - LinkCountLimit *int `json:"link_count_limit,omitempty" yaml:"link_count_limit,omitempty" mapstructure:"link_count_limit,omitempty"` -} - -type SpanProcessor struct { - // Batch corresponds to the JSON schema field "batch". - Batch *BatchSpanProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` - - // Simple corresponds to the JSON schema field "simple". - Simple *SimpleSpanProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` - - AdditionalProperties interface{} -} - -type TracerProvider struct { - // Limits corresponds to the JSON schema field "limits". - Limits *SpanLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Processors corresponds to the JSON schema field "processors". - Processors []SpanProcessor `json:"processors,omitempty" yaml:"processors,omitempty" mapstructure:"processors,omitempty"` - - // Sampler corresponds to the JSON schema field "sampler". - Sampler *Sampler `json:"sampler,omitempty" yaml:"sampler,omitempty" mapstructure:"sampler,omitempty"` -} - -type View struct { - // Selector corresponds to the JSON schema field "selector". - Selector *ViewSelector `json:"selector,omitempty" yaml:"selector,omitempty" mapstructure:"selector,omitempty"` - - // Stream corresponds to the JSON schema field "stream". - Stream *ViewStream `json:"stream,omitempty" yaml:"stream,omitempty" mapstructure:"stream,omitempty"` -} - -type ViewSelector struct { - // InstrumentName corresponds to the JSON schema field "instrument_name". - InstrumentName *string `json:"instrument_name,omitempty" yaml:"instrument_name,omitempty" mapstructure:"instrument_name,omitempty"` - - // InstrumentType corresponds to the JSON schema field "instrument_type". - InstrumentType *ViewSelectorInstrumentType `json:"instrument_type,omitempty" yaml:"instrument_type,omitempty" mapstructure:"instrument_type,omitempty"` - - // MeterName corresponds to the JSON schema field "meter_name". - MeterName *string `json:"meter_name,omitempty" yaml:"meter_name,omitempty" mapstructure:"meter_name,omitempty"` - - // MeterSchemaUrl corresponds to the JSON schema field "meter_schema_url". - MeterSchemaUrl *string `json:"meter_schema_url,omitempty" yaml:"meter_schema_url,omitempty" mapstructure:"meter_schema_url,omitempty"` - - // MeterVersion corresponds to the JSON schema field "meter_version". - MeterVersion *string `json:"meter_version,omitempty" yaml:"meter_version,omitempty" mapstructure:"meter_version,omitempty"` - - // Unit corresponds to the JSON schema field "unit". - Unit *string `json:"unit,omitempty" yaml:"unit,omitempty" mapstructure:"unit,omitempty"` -} - -type ViewSelectorInstrumentType string - -const ViewSelectorInstrumentTypeCounter ViewSelectorInstrumentType = "counter" -const ViewSelectorInstrumentTypeHistogram ViewSelectorInstrumentType = "histogram" -const ViewSelectorInstrumentTypeObservableCounter ViewSelectorInstrumentType = "observable_counter" -const ViewSelectorInstrumentTypeObservableGauge ViewSelectorInstrumentType = "observable_gauge" -const ViewSelectorInstrumentTypeObservableUpDownCounter ViewSelectorInstrumentType = "observable_up_down_counter" -const ViewSelectorInstrumentTypeUpDownCounter ViewSelectorInstrumentType = "up_down_counter" - -var enumValues_ViewSelectorInstrumentType = []interface{}{ - "counter", - "histogram", - "observable_counter", - "observable_gauge", - "observable_up_down_counter", - "up_down_counter", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *ViewSelectorInstrumentType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_ViewSelectorInstrumentType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_ViewSelectorInstrumentType, v) - } - *j = ViewSelectorInstrumentType(v) - return nil -} - -type ViewStream struct { - // Aggregation corresponds to the JSON schema field "aggregation". - Aggregation *ViewStreamAggregation `json:"aggregation,omitempty" yaml:"aggregation,omitempty" mapstructure:"aggregation,omitempty"` - - // AttributeKeys corresponds to the JSON schema field "attribute_keys". - AttributeKeys []string `json:"attribute_keys,omitempty" yaml:"attribute_keys,omitempty" mapstructure:"attribute_keys,omitempty"` - - // Description corresponds to the JSON schema field "description". - Description *string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description,omitempty"` - - // Name corresponds to the JSON schema field "name". - Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` -} - -type ViewStreamAggregation struct { - // Base2ExponentialBucketHistogram corresponds to the JSON schema field - // "base2_exponential_bucket_histogram". - Base2ExponentialBucketHistogram *ViewStreamAggregationBase2ExponentialBucketHistogram `json:"base2_exponential_bucket_histogram,omitempty" yaml:"base2_exponential_bucket_histogram,omitempty" mapstructure:"base2_exponential_bucket_histogram,omitempty"` - - // Default corresponds to the JSON schema field "default". - Default ViewStreamAggregationDefault `json:"default,omitempty" yaml:"default,omitempty" mapstructure:"default,omitempty"` - - // Drop corresponds to the JSON schema field "drop". - Drop ViewStreamAggregationDrop `json:"drop,omitempty" yaml:"drop,omitempty" mapstructure:"drop,omitempty"` - - // ExplicitBucketHistogram corresponds to the JSON schema field - // "explicit_bucket_histogram". - ExplicitBucketHistogram *ViewStreamAggregationExplicitBucketHistogram `json:"explicit_bucket_histogram,omitempty" yaml:"explicit_bucket_histogram,omitempty" mapstructure:"explicit_bucket_histogram,omitempty"` - - // LastValue corresponds to the JSON schema field "last_value". - LastValue ViewStreamAggregationLastValue `json:"last_value,omitempty" yaml:"last_value,omitempty" mapstructure:"last_value,omitempty"` - - // Sum corresponds to the JSON schema field "sum". - Sum ViewStreamAggregationSum `json:"sum,omitempty" yaml:"sum,omitempty" mapstructure:"sum,omitempty"` -} - -type ViewStreamAggregationBase2ExponentialBucketHistogram struct { - // MaxScale corresponds to the JSON schema field "max_scale". - MaxScale *int `json:"max_scale,omitempty" yaml:"max_scale,omitempty" mapstructure:"max_scale,omitempty"` - - // MaxSize corresponds to the JSON schema field "max_size". - MaxSize *int `json:"max_size,omitempty" yaml:"max_size,omitempty" mapstructure:"max_size,omitempty"` - - // RecordMinMax corresponds to the JSON schema field "record_min_max". - RecordMinMax *bool `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` -} - -type ViewStreamAggregationDefault map[string]interface{} - -type ViewStreamAggregationDrop map[string]interface{} - -type ViewStreamAggregationExplicitBucketHistogram struct { - // Boundaries corresponds to the JSON schema field "boundaries". - Boundaries []float64 `json:"boundaries,omitempty" yaml:"boundaries,omitempty" mapstructure:"boundaries,omitempty"` - - // RecordMinMax corresponds to the JSON schema field "record_min_max". - RecordMinMax *bool `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` -} - -type ViewStreamAggregationLastValue map[string]interface{} - -type ViewStreamAggregationSum map[string]interface{} - -type Zipkin struct { - // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` - - // Timeout corresponds to the JSON schema field "timeout". - Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *Zipkin) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["endpoint"]; raw != nil && !ok { - return fmt.Errorf("field endpoint in Zipkin: required") - } - type Plain Zipkin - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = Zipkin(plain) - return nil -} diff --git a/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/log.go b/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/log.go deleted file mode 100644 index 829b54a56e8..00000000000 --- a/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/log.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelconf // import "go.opentelemetry.io/contrib/otelconf/v0.2.0" - -import ( - "context" - "errors" - "fmt" - "net/url" - "time" - - "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" - "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" - "go.opentelemetry.io/otel/log" - "go.opentelemetry.io/otel/log/noop" - sdklog "go.opentelemetry.io/otel/sdk/log" - "go.opentelemetry.io/otel/sdk/resource" -) - -func loggerProvider(cfg configOptions, res *resource.Resource) (log.LoggerProvider, shutdownFunc, error) { - if cfg.opentelemetryConfig.LoggerProvider == nil { - return noop.NewLoggerProvider(), noopShutdown, nil - } - opts := []sdklog.LoggerProviderOption{ - sdklog.WithResource(res), - } - var errs []error - for _, processor := range cfg.opentelemetryConfig.LoggerProvider.Processors { - sp, err := logProcessor(cfg.ctx, processor) - if err == nil { - opts = append(opts, sdklog.WithProcessor(sp)) - } else { - errs = append(errs, err) - } - } - - if len(errs) > 0 { - return noop.NewLoggerProvider(), noopShutdown, errors.Join(errs...) - } - - lp := sdklog.NewLoggerProvider(opts...) - return lp, lp.Shutdown, nil -} - -func logProcessor(ctx context.Context, processor LogRecordProcessor) (sdklog.Processor, error) { - if processor.Batch != nil && processor.Simple != nil { - return nil, errors.New("must not specify multiple log processor type") - } - if processor.Batch != nil { - exp, err := logExporter(ctx, processor.Batch.Exporter) - if err != nil { - return nil, err - } - return batchLogProcessor(processor.Batch, exp) - } - if processor.Simple != nil { - exp, err := logExporter(ctx, processor.Simple.Exporter) - if err != nil { - return nil, err - } - return sdklog.NewSimpleProcessor(exp), nil - } - return nil, errors.New("unsupported log processor type, must be one of simple or batch") -} - -func logExporter(ctx context.Context, exporter LogRecordExporter) (sdklog.Exporter, error) { - if exporter.Console != nil && exporter.OTLP != nil { - return nil, errors.New("must not specify multiple exporters") - } - - if exporter.Console != nil { - return stdoutlog.New( - stdoutlog.WithPrettyPrint(), - ) - } - - if exporter.OTLP != nil { - switch exporter.OTLP.Protocol { - case protocolProtobufHTTP: - return otlpHTTPLogExporter(ctx, exporter.OTLP) - default: - return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) - } - } - return nil, errors.New("no valid log exporter") -} - -func batchLogProcessor(blp *BatchLogRecordProcessor, exp sdklog.Exporter) (*sdklog.BatchProcessor, error) { - var opts []sdklog.BatchProcessorOption - if blp.ExportTimeout != nil { - if *blp.ExportTimeout < 0 { - return nil, fmt.Errorf("invalid export timeout %d", *blp.ExportTimeout) - } - opts = append(opts, sdklog.WithExportTimeout(time.Millisecond*time.Duration(*blp.ExportTimeout))) - } - if blp.MaxExportBatchSize != nil { - if *blp.MaxExportBatchSize < 0 { - return nil, fmt.Errorf("invalid batch size %d", *blp.MaxExportBatchSize) - } - opts = append(opts, sdklog.WithExportMaxBatchSize(*blp.MaxExportBatchSize)) - } - if blp.MaxQueueSize != nil { - if *blp.MaxQueueSize < 0 { - return nil, fmt.Errorf("invalid queue size %d", *blp.MaxQueueSize) - } - opts = append(opts, sdklog.WithMaxQueueSize(*blp.MaxQueueSize)) - } - - if blp.ScheduleDelay != nil { - if *blp.ScheduleDelay < 0 { - return nil, fmt.Errorf("invalid schedule delay %d", *blp.ScheduleDelay) - } - opts = append(opts, sdklog.WithExportInterval(time.Millisecond*time.Duration(*blp.ScheduleDelay))) - } - - return sdklog.NewBatchProcessor(exp, opts...), nil -} - -func otlpHTTPLogExporter(ctx context.Context, otlpConfig *OTLP) (sdklog.Exporter, error) { - var opts []otlploghttp.Option - - if otlpConfig.Endpoint != "" { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) - if err != nil { - return nil, err - } - opts = append(opts, otlploghttp.WithEndpoint(u.Host)) - - if u.Scheme == "http" { - opts = append(opts, otlploghttp.WithInsecure()) - } - if u.Path != "" { - opts = append(opts, otlploghttp.WithURLPath(u.Path)) - } - } - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlploghttp.WithCompression(otlploghttp.GzipCompression)) - case compressionNone: - opts = append(opts, otlploghttp.WithCompression(otlploghttp.NoCompression)) - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { - opts = append(opts, otlploghttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlploghttp.WithHeaders(otlpConfig.Headers)) - } - - return otlploghttp.New(ctx, opts...) -} diff --git a/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/metric.go b/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/metric.go deleted file mode 100644 index e0c1cadaa67..00000000000 --- a/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/metric.go +++ /dev/null @@ -1,506 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelconf // import "go.opentelemetry.io/contrib/otelconf/v0.2.0" - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "math" - "net" - "net/http" - "net/url" - "os" - "strconv" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" - otelprom "go.opentelemetry.io/otel/exporters/prometheus" - "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" - "go.opentelemetry.io/otel/sdk/instrumentation" - sdkmetric "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/metricdata" - "go.opentelemetry.io/otel/sdk/resource" -) - -var zeroScope instrumentation.Scope - -const instrumentKindUndefined = sdkmetric.InstrumentKind(0) - -func meterProvider(cfg configOptions, res *resource.Resource) (metric.MeterProvider, shutdownFunc, error) { - if cfg.opentelemetryConfig.MeterProvider == nil { - return noop.NewMeterProvider(), noopShutdown, nil - } - opts := []sdkmetric.Option{ - sdkmetric.WithResource(res), - } - - var errs []error - for _, reader := range cfg.opentelemetryConfig.MeterProvider.Readers { - r, err := metricReader(cfg.ctx, reader) - if err == nil { - opts = append(opts, sdkmetric.WithReader(r)) - } else { - errs = append(errs, err) - } - } - for _, vw := range cfg.opentelemetryConfig.MeterProvider.Views { - v, err := view(vw) - if err == nil { - opts = append(opts, sdkmetric.WithView(v)) - } else { - errs = append(errs, err) - } - } - - if len(errs) > 0 { - return noop.NewMeterProvider(), noopShutdown, errors.Join(errs...) - } - - mp := sdkmetric.NewMeterProvider(opts...) - return mp, mp.Shutdown, nil -} - -func metricReader(ctx context.Context, r MetricReader) (sdkmetric.Reader, error) { - if r.Periodic != nil && r.Pull != nil { - return nil, errors.New("must not specify multiple metric reader type") - } - - if r.Periodic != nil { - var opts []sdkmetric.PeriodicReaderOption - if r.Periodic.Interval != nil { - opts = append(opts, sdkmetric.WithInterval(time.Duration(*r.Periodic.Interval)*time.Millisecond)) - } - - if r.Periodic.Timeout != nil { - opts = append(opts, sdkmetric.WithTimeout(time.Duration(*r.Periodic.Timeout)*time.Millisecond)) - } - return periodicExporter(ctx, r.Periodic.Exporter, opts...) - } - - if r.Pull != nil { - return pullReader(ctx, r.Pull.Exporter) - } - return nil, errors.New("no valid metric reader") -} - -func pullReader(ctx context.Context, exporter MetricExporter) (sdkmetric.Reader, error) { - if exporter.Prometheus != nil { - return prometheusReader(ctx, exporter.Prometheus) - } - return nil, errors.New("no valid metric exporter") -} - -func periodicExporter(ctx context.Context, exporter MetricExporter, opts ...sdkmetric.PeriodicReaderOption) (sdkmetric.Reader, error) { - if exporter.Console != nil && exporter.OTLP != nil { - return nil, errors.New("must not specify multiple exporters") - } - if exporter.Console != nil { - enc := json.NewEncoder(os.Stdout) - enc.SetIndent("", " ") - - exp, err := stdoutmetric.New( - stdoutmetric.WithEncoder(enc), - ) - if err != nil { - return nil, err - } - return sdkmetric.NewPeriodicReader(exp, opts...), nil - } - if exporter.OTLP != nil { - var err error - var exp sdkmetric.Exporter - switch exporter.OTLP.Protocol { - case protocolProtobufHTTP: - exp, err = otlpHTTPMetricExporter(ctx, exporter.OTLP) - case protocolProtobufGRPC: - exp, err = otlpGRPCMetricExporter(ctx, exporter.OTLP) - default: - return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) - } - if err != nil { - return nil, err - } - return sdkmetric.NewPeriodicReader(exp, opts...), nil - } - return nil, errors.New("no valid metric exporter") -} - -func otlpHTTPMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmetric.Exporter, error) { - opts := []otlpmetrichttp.Option{} - - if otlpConfig.Endpoint != "" { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) - if err != nil { - return nil, err - } - opts = append(opts, otlpmetrichttp.WithEndpoint(u.Host)) - - if u.Scheme == "http" { - opts = append(opts, otlpmetrichttp.WithInsecure()) - } - if u.Path != "" { - opts = append(opts, otlpmetrichttp.WithURLPath(u.Path)) - } - } - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.GzipCompression)) - case compressionNone: - opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.NoCompression)) - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil { - opts = append(opts, otlpmetrichttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlpmetrichttp.WithHeaders(otlpConfig.Headers)) - } - if otlpConfig.TemporalityPreference != nil { - switch *otlpConfig.TemporalityPreference { - case "delta": - opts = append(opts, otlpmetrichttp.WithTemporalitySelector(deltaTemporality)) - case "cumulative": - opts = append(opts, otlpmetrichttp.WithTemporalitySelector(cumulativeTemporality)) - case "lowmemory": - opts = append(opts, otlpmetrichttp.WithTemporalitySelector(lowMemory)) - default: - return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) - } - } - - return otlpmetrichttp.New(ctx, opts...) -} - -func otlpGRPCMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmetric.Exporter, error) { - var opts []otlpmetricgrpc.Option - - if otlpConfig.Endpoint != "" { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) - if err != nil { - return nil, err - } - // ParseRequestURI leaves the Host field empty when no - // scheme is specified (i.e. localhost:4317). This check is - // here to support the case where a user may not specify a - // scheme. The code does its best effort here by using - // otlpConfig.Endpoint as-is in that case - if u.Host != "" { - opts = append(opts, otlpmetricgrpc.WithEndpoint(u.Host)) - } else { - opts = append(opts, otlpmetricgrpc.WithEndpoint(otlpConfig.Endpoint)) - } - if u.Scheme == "http" { - opts = append(opts, otlpmetricgrpc.WithInsecure()) - } - } - - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlpmetricgrpc.WithCompressor(*otlpConfig.Compression)) - case compressionNone: - // none requires no options - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { - opts = append(opts, otlpmetricgrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlpmetricgrpc.WithHeaders(otlpConfig.Headers)) - } - if otlpConfig.TemporalityPreference != nil { - switch *otlpConfig.TemporalityPreference { - case "delta": - opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(deltaTemporality)) - case "cumulative": - opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(cumulativeTemporality)) - case "lowmemory": - opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(lowMemory)) - default: - return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) - } - } - - return otlpmetricgrpc.New(ctx, opts...) -} - -func cumulativeTemporality(sdkmetric.InstrumentKind) metricdata.Temporality { - return metricdata.CumulativeTemporality -} - -func deltaTemporality(ik sdkmetric.InstrumentKind) metricdata.Temporality { - switch ik { - case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram, sdkmetric.InstrumentKindObservableCounter: - return metricdata.DeltaTemporality - default: - return metricdata.CumulativeTemporality - } -} - -func lowMemory(ik sdkmetric.InstrumentKind) metricdata.Temporality { - switch ik { - case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram: - return metricdata.DeltaTemporality - default: - return metricdata.CumulativeTemporality - } -} - -func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmetric.Reader, error) { - var opts []otelprom.Option - if prometheusConfig.Host == nil { - return nil, errors.New("host must be specified") - } - if prometheusConfig.Port == nil { - return nil, errors.New("port must be specified") - } - if prometheusConfig.WithoutScopeInfo != nil && *prometheusConfig.WithoutScopeInfo { - opts = append(opts, otelprom.WithoutScopeInfo()) - } - if prometheusConfig.WithoutTypeSuffix != nil && *prometheusConfig.WithoutTypeSuffix { - opts = append(opts, otelprom.WithoutCounterSuffixes()) //nolint:staticcheck // WithoutCounterSuffixes is deprecated, but we still need it for backwards compatibility. - } - if prometheusConfig.WithoutUnits != nil && *prometheusConfig.WithoutUnits { - opts = append(opts, otelprom.WithoutUnits()) //nolint:staticcheck // WithouTypeSuffix is deprecated, but we still need it for backwards compatibility. - } - if prometheusConfig.WithResourceConstantLabels != nil { - if prometheusConfig.WithResourceConstantLabels.Included != nil { - var keys []attribute.Key - for _, val := range prometheusConfig.WithResourceConstantLabels.Included { - keys = append(keys, attribute.Key(val)) - } - otelprom.WithResourceAsConstantLabels(attribute.NewAllowKeysFilter(keys...)) - } - if prometheusConfig.WithResourceConstantLabels.Excluded != nil { - var keys []attribute.Key - for _, val := range prometheusConfig.WithResourceConstantLabels.Included { - keys = append(keys, attribute.Key(val)) - } - otelprom.WithResourceAsConstantLabels(attribute.NewDenyKeysFilter(keys...)) - } - } - - reg := prometheus.NewRegistry() - opts = append(opts, otelprom.WithRegisterer(reg)) - - reader, err := otelprom.New(opts...) - if err != nil { - return nil, fmt.Errorf("error creating otel prometheus exporter: %w", err) - } - - mux := http.NewServeMux() - mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) - server := http.Server{ - // Timeouts are necessary to make a server resilient to attacks, but ListenAndServe doesn't set any. - // We use values from this example: https://blog.cloudflare.com/exposing-go-on-the-internet/#:~:text=There%20are%20three%20main%20timeouts - ReadTimeout: 5 * time.Second, - WriteTimeout: 10 * time.Second, - IdleTimeout: 120 * time.Second, - Handler: mux, - } - - // Remove surrounding "[]" from the host definition to allow users to define the host as "[::1]" or "::1". - host := *prometheusConfig.Host - if len(host) > 2 && host[0] == '[' && host[len(host)-1] == ']' { - host = host[1 : len(host)-1] - } - - addr := net.JoinHostPort(host, strconv.Itoa(*prometheusConfig.Port)) - lis, err := net.Listen("tcp", addr) - if err != nil { - return nil, errors.Join( - fmt.Errorf("binding address %s for Prometheus exporter: %w", addr, err), - reader.Shutdown(ctx), - ) - } - - // Only for testing reasons, add the address to the http Server, will not be used. - server.Addr = lis.Addr().String() - - go func() { - if err := server.Serve(lis); err != nil && errors.Is(err, http.ErrServerClosed) { - otel.Handle(fmt.Errorf("the Prometheus HTTP server exited unexpectedly: %w", err)) - } - }() - - return readerWithServer{reader, &server}, nil -} - -type readerWithServer struct { - sdkmetric.Reader - server *http.Server -} - -func (rws readerWithServer) Shutdown(ctx context.Context) error { - return errors.Join( - rws.Reader.Shutdown(ctx), - rws.server.Shutdown(ctx), - ) -} - -func view(v View) (sdkmetric.View, error) { - if v.Selector == nil { - return nil, errors.New("view: no selector provided") - } - - inst, err := instrument(*v.Selector) - if err != nil { - return nil, err - } - - return sdkmetric.NewView(inst, stream(v.Stream)), nil -} - -func instrument(vs ViewSelector) (sdkmetric.Instrument, error) { - kind, err := instrumentKind(vs.InstrumentType) - if err != nil { - return sdkmetric.Instrument{}, fmt.Errorf("view_selector: %w", err) - } - inst := sdkmetric.Instrument{ - Name: strOrEmpty(vs.InstrumentName), - Unit: strOrEmpty(vs.Unit), - Kind: kind, - Scope: instrumentation.Scope{ - Name: strOrEmpty(vs.MeterName), - Version: strOrEmpty(vs.MeterVersion), - SchemaURL: strOrEmpty(vs.MeterSchemaUrl), - }, - } - - if instrumentIsEmpty(inst) { - return sdkmetric.Instrument{}, errors.New("view_selector: empty selector not supporter") - } - return inst, nil -} - -func stream(vs *ViewStream) sdkmetric.Stream { - if vs == nil { - return sdkmetric.Stream{} - } - - return sdkmetric.Stream{ - Name: strOrEmpty(vs.Name), - Description: strOrEmpty(vs.Description), - Aggregation: aggregation(vs.Aggregation), - AttributeFilter: attributeFilter(vs.AttributeKeys), - } -} - -func attributeFilter(attributeKeys []string) attribute.Filter { - var attrKeys []attribute.Key - for _, attrStr := range attributeKeys { - attrKeys = append(attrKeys, attribute.Key(attrStr)) - } - return attribute.NewAllowKeysFilter(attrKeys...) -} - -func aggregation(aggr *ViewStreamAggregation) sdkmetric.Aggregation { - if aggr == nil { - return nil - } - - if aggr.Base2ExponentialBucketHistogram != nil { - return sdkmetric.AggregationBase2ExponentialHistogram{ - MaxSize: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxSize), - MaxScale: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxScale), - // Need to negate because config has the positive action RecordMinMax. - NoMinMax: !boolOrFalse(aggr.Base2ExponentialBucketHistogram.RecordMinMax), - } - } - if aggr.Default != nil { - // TODO: Understand what to set here. - return nil - } - if aggr.Drop != nil { - return sdkmetric.AggregationDrop{} - } - if aggr.ExplicitBucketHistogram != nil { - return sdkmetric.AggregationExplicitBucketHistogram{ - Boundaries: aggr.ExplicitBucketHistogram.Boundaries, - // Need to negate because config has the positive action RecordMinMax. - NoMinMax: !boolOrFalse(aggr.ExplicitBucketHistogram.RecordMinMax), - } - } - if aggr.LastValue != nil { - return sdkmetric.AggregationLastValue{} - } - if aggr.Sum != nil { - return sdkmetric.AggregationSum{} - } - return nil -} - -func instrumentKind(vsit *ViewSelectorInstrumentType) (sdkmetric.InstrumentKind, error) { - if vsit == nil { - // Equivalent to instrumentKindUndefined. - return instrumentKindUndefined, nil - } - - switch *vsit { - case ViewSelectorInstrumentTypeCounter: - return sdkmetric.InstrumentKindCounter, nil - case ViewSelectorInstrumentTypeUpDownCounter: - return sdkmetric.InstrumentKindUpDownCounter, nil - case ViewSelectorInstrumentTypeHistogram: - return sdkmetric.InstrumentKindHistogram, nil - case ViewSelectorInstrumentTypeObservableCounter: - return sdkmetric.InstrumentKindObservableCounter, nil - case ViewSelectorInstrumentTypeObservableUpDownCounter: - return sdkmetric.InstrumentKindObservableUpDownCounter, nil - case ViewSelectorInstrumentTypeObservableGauge: - return sdkmetric.InstrumentKindObservableGauge, nil - } - - return instrumentKindUndefined, errors.New("instrument_type: invalid value") -} - -func instrumentIsEmpty(i sdkmetric.Instrument) bool { - return i.Name == "" && - i.Description == "" && - i.Kind == instrumentKindUndefined && - i.Unit == "" && - i.Scope == zeroScope -} - -func boolOrFalse(pBool *bool) bool { - if pBool == nil { - return false - } - return *pBool -} - -func int32OrZero(pInt *int) int32 { - if pInt == nil { - return 0 - } - i := *pInt - if i > math.MaxInt32 { - return math.MaxInt32 - } - if i < math.MinInt32 { - return math.MinInt32 - } - return int32(i) -} - -func strOrEmpty(pStr *string) string { - if pStr == nil { - return "" - } - return *pStr -} diff --git a/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/resource.go b/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/resource.go deleted file mode 100644 index 84f3e30dab5..00000000000 --- a/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/resource.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelconf // import "go.opentelemetry.io/contrib/otelconf/v0.2.0" - -import ( - "fmt" - "strconv" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/resource" -) - -func keyVal(k string, v any) attribute.KeyValue { - switch val := v.(type) { - case bool: - return attribute.Bool(k, val) - case int64: - return attribute.Int64(k, val) - case uint64: - return attribute.String(k, strconv.FormatUint(val, 10)) - case float64: - return attribute.Float64(k, val) - case int8: - return attribute.Int64(k, int64(val)) - case uint8: - return attribute.Int64(k, int64(val)) - case int16: - return attribute.Int64(k, int64(val)) - case uint16: - return attribute.Int64(k, int64(val)) - case int32: - return attribute.Int64(k, int64(val)) - case uint32: - return attribute.Int64(k, int64(val)) - case float32: - return attribute.Float64(k, float64(val)) - case int: - return attribute.Int(k, val) - case uint: - return attribute.String(k, strconv.FormatUint(uint64(val), 10)) - case string: - return attribute.String(k, val) - default: - return attribute.String(k, fmt.Sprint(v)) - } -} - -func newResource(res *Resource) (*resource.Resource, error) { - if res == nil || res.Attributes == nil { - return resource.Default(), nil - } - var attrs []attribute.KeyValue - - for k, v := range res.Attributes { - attrs = append(attrs, keyVal(k, v)) - } - - return resource.Merge(resource.Default(), - resource.NewWithAttributes(*res.SchemaUrl, - attrs..., - )) -} diff --git a/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/trace.go b/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/trace.go deleted file mode 100644 index 2de0e609e20..00000000000 --- a/vendor/go.opentelemetry.io/contrib/otelconf/v0.2.0/trace.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelconf // import "go.opentelemetry.io/contrib/otelconf/v0.2.0" - -import ( - "context" - "errors" - "fmt" - "net/url" - "time" - - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" - "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" - "go.opentelemetry.io/otel/sdk/resource" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/noop" -) - -func tracerProvider(cfg configOptions, res *resource.Resource) (trace.TracerProvider, shutdownFunc, error) { - if cfg.opentelemetryConfig.TracerProvider == nil { - return noop.NewTracerProvider(), noopShutdown, nil - } - opts := []sdktrace.TracerProviderOption{ - sdktrace.WithResource(res), - } - var errs []error - for _, processor := range cfg.opentelemetryConfig.TracerProvider.Processors { - sp, err := spanProcessor(cfg.ctx, processor) - if err == nil { - opts = append(opts, sdktrace.WithSpanProcessor(sp)) - } else { - errs = append(errs, err) - } - } - if len(errs) > 0 { - return noop.NewTracerProvider(), noopShutdown, errors.Join(errs...) - } - tp := sdktrace.NewTracerProvider(opts...) - return tp, tp.Shutdown, nil -} - -func spanExporter(ctx context.Context, exporter SpanExporter) (sdktrace.SpanExporter, error) { - if exporter.Console != nil && exporter.OTLP != nil { - return nil, errors.New("must not specify multiple exporters") - } - - if exporter.Console != nil { - return stdouttrace.New( - stdouttrace.WithPrettyPrint(), - ) - } - if exporter.OTLP != nil { - switch exporter.OTLP.Protocol { - case protocolProtobufHTTP: - return otlpHTTPSpanExporter(ctx, exporter.OTLP) - case protocolProtobufGRPC: - return otlpGRPCSpanExporter(ctx, exporter.OTLP) - default: - return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) - } - } - return nil, errors.New("no valid span exporter") -} - -func spanProcessor(ctx context.Context, processor SpanProcessor) (sdktrace.SpanProcessor, error) { - if processor.Batch != nil && processor.Simple != nil { - return nil, errors.New("must not specify multiple span processor type") - } - if processor.Batch != nil { - exp, err := spanExporter(ctx, processor.Batch.Exporter) - if err != nil { - return nil, err - } - return batchSpanProcessor(processor.Batch, exp) - } - if processor.Simple != nil { - exp, err := spanExporter(ctx, processor.Simple.Exporter) - if err != nil { - return nil, err - } - return sdktrace.NewSimpleSpanProcessor(exp), nil - } - return nil, errors.New("unsupported span processor type, must be one of simple or batch") -} - -func otlpGRPCSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanExporter, error) { - var opts []otlptracegrpc.Option - - if otlpConfig.Endpoint != "" { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) - if err != nil { - return nil, err - } - // ParseRequestURI leaves the Host field empty when no - // scheme is specified (i.e. localhost:4317). This check is - // here to support the case where a user may not specify a - // scheme. The code does its best effort here by using - // otlpConfig.Endpoint as-is in that case. - if u.Host != "" { - opts = append(opts, otlptracegrpc.WithEndpoint(u.Host)) - } else { - opts = append(opts, otlptracegrpc.WithEndpoint(otlpConfig.Endpoint)) - } - - if u.Scheme == "http" { - opts = append(opts, otlptracegrpc.WithInsecure()) - } - } - - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlptracegrpc.WithCompressor(*otlpConfig.Compression)) - case compressionNone: - // none requires no options - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { - opts = append(opts, otlptracegrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlptracegrpc.WithHeaders(otlpConfig.Headers)) - } - - return otlptracegrpc.New(ctx, opts...) -} - -func otlpHTTPSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanExporter, error) { - var opts []otlptracehttp.Option - - if otlpConfig.Endpoint != "" { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) - if err != nil { - return nil, err - } - opts = append(opts, otlptracehttp.WithEndpoint(u.Host)) - - if u.Scheme == "http" { - opts = append(opts, otlptracehttp.WithInsecure()) - } - if u.Path != "" { - opts = append(opts, otlptracehttp.WithURLPath(u.Path)) - } - } - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlptracehttp.WithCompression(otlptracehttp.GzipCompression)) - case compressionNone: - opts = append(opts, otlptracehttp.WithCompression(otlptracehttp.NoCompression)) - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { - opts = append(opts, otlptracehttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlptracehttp.WithHeaders(otlpConfig.Headers)) - } - - return otlptracehttp.New(ctx, opts...) -} - -func batchSpanProcessor(bsp *BatchSpanProcessor, exp sdktrace.SpanExporter) (sdktrace.SpanProcessor, error) { - var opts []sdktrace.BatchSpanProcessorOption - if bsp.ExportTimeout != nil { - if *bsp.ExportTimeout < 0 { - return nil, fmt.Errorf("invalid export timeout %d", *bsp.ExportTimeout) - } - opts = append(opts, sdktrace.WithExportTimeout(time.Millisecond*time.Duration(*bsp.ExportTimeout))) - } - if bsp.MaxExportBatchSize != nil { - if *bsp.MaxExportBatchSize < 0 { - return nil, fmt.Errorf("invalid batch size %d", *bsp.MaxExportBatchSize) - } - opts = append(opts, sdktrace.WithMaxExportBatchSize(*bsp.MaxExportBatchSize)) - } - if bsp.MaxQueueSize != nil { - if *bsp.MaxQueueSize < 0 { - return nil, fmt.Errorf("invalid queue size %d", *bsp.MaxQueueSize) - } - opts = append(opts, sdktrace.WithMaxQueueSize(*bsp.MaxQueueSize)) - } - if bsp.ScheduleDelay != nil { - if *bsp.ScheduleDelay < 0 { - return nil, fmt.Errorf("invalid schedule delay %d", *bsp.ScheduleDelay) - } - opts = append(opts, sdktrace.WithBatchTimeout(time.Millisecond*time.Duration(*bsp.ScheduleDelay))) - } - return sdktrace.NewBatchSpanProcessor(exp, opts...), nil -} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/LICENSE b/vendor/go.opentelemetry.io/contrib/propagators/b3/LICENSE deleted file mode 100644 index f1aee0f1100..00000000000 --- a/vendor/go.opentelemetry.io/contrib/propagators/b3/LICENSE +++ /dev/null @@ -1,231 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------------------------------------- - -Copyright 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google LLC nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_config.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_config.go deleted file mode 100644 index 15a5f7715d3..00000000000 --- a/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_config.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package b3 // import "go.opentelemetry.io/contrib/propagators/b3" - -type config struct { - // InjectEncoding are the B3 encodings used when injecting trace - // information. If no encoding is specified (i.e. `B3Unspecified`) - // `B3SingleHeader` will be used as the default. - InjectEncoding Encoding -} - -// Option interface used for setting optional config properties. -type Option interface { - apply(*config) -} - -type optionFunc func(*config) - -func (o optionFunc) apply(c *config) { - o(c) -} - -// newConfig creates a new config struct and applies opts to it. -func newConfig(opts ...Option) *config { - c := &config{} - for _, opt := range opts { - opt.apply(c) - } - return c -} - -// Encoding is a bitmask representation of the B3 encoding type. -type Encoding uint8 - -// supports returns if e has o bit(s) set. -func (e Encoding) supports(o Encoding) bool { - return e&o == o -} - -const ( - // B3Unspecified is an unspecified B3 encoding. - B3Unspecified Encoding = 0 - // B3MultipleHeader is a B3 encoding that uses multiple headers to - // transmit tracing information all prefixed with `x-b3-`. - // x-b3-traceid: {TraceId} - // x-b3-parentspanid: {ParentSpanId} - // x-b3-spanid: {SpanId} - // x-b3-sampled: {SamplingState} - // x-b3-flags: {DebugFlag} - B3MultipleHeader Encoding = 1 << iota - // B3SingleHeader is a B3 encoding that uses a single header named `b3` - // to transmit tracing information. - // b3: {TraceId}-{SpanId}-{SamplingState}-{ParentSpanId} - B3SingleHeader -) - -// WithInjectEncoding sets the encoding the propagator will inject. -// The encoding is interpreted as a bitmask. Therefore -// -// WithInjectEncoding(B3SingleHeader | B3MultipleHeader) -// -// means the propagator will inject both single and multi B3 headers. -func WithInjectEncoding(encoding Encoding) Option { - return optionFunc(func(c *config) { - c.InjectEncoding = encoding - }) -} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go deleted file mode 100644 index 725d9eff916..00000000000 --- a/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package b3 // import "go.opentelemetry.io/contrib/propagators/b3" - -import ( - "context" - "errors" - "strings" - - "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" -) - -const ( - // Default B3 Header names. - b3ContextHeader = "b3" - b3DebugFlagHeader = "x-b3-flags" - b3TraceIDHeader = "x-b3-traceid" - b3SpanIDHeader = "x-b3-spanid" - b3SampledHeader = "x-b3-sampled" - b3ParentSpanIDHeader = "x-b3-parentspanid" - - b3TraceIDPadding = "0000000000000000" - - // B3 Single Header encoding widths. - separatorWidth = 1 // Single "-" character. - samplingWidth = 1 // Single hex character. - traceID64BitsWidth = 64 / 4 // 16 hex character Trace ID. - traceID128BitsWidth = 128 / 4 // 32 hex character Trace ID. - spanIDWidth = 16 // 16 hex character ID. - parentSpanIDWidth = 16 // 16 hex character ID. -) - -var ( - empty = trace.SpanContext{} - - errInvalidSampledByte = errors.New("invalid B3 Sampled found") - errInvalidSampledHeader = errors.New("invalid B3 Sampled header found") - errInvalidTraceIDHeader = errors.New("invalid B3 traceID header found") - errInvalidSpanIDHeader = errors.New("invalid B3 spanID header found") - errInvalidParentSpanIDHeader = errors.New("invalid B3 ParentSpanID header found") - errInvalidScope = errors.New("require either both traceID and spanID or none") - errInvalidScopeParent = errors.New("traceID and spanID required for ParentSpanID") - errInvalidScopeParentSingle = errors.New("traceID, spanID and Sampled required for ParentSpanID") - errEmptyContext = errors.New("empty request context") - errInvalidTraceIDValue = errors.New("invalid B3 traceID value found") - errInvalidSpanIDValue = errors.New("invalid B3 spanID value found") - errInvalidParentSpanIDValue = errors.New("invalid B3 ParentSpanID value found") -) - -type propagator struct { - cfg config -} - -var _ propagation.TextMapPropagator = propagator{} - -// New creates a B3 implementation of propagation.TextMapPropagator. -// B3 propagator serializes SpanContext to/from B3 Headers. -// This propagator supports both versions of B3 headers, -// 1. Single Header: -// b3: {TraceId}-{SpanId}-{SamplingState}-{ParentSpanId} -// 2. Multiple Headers: -// x-b3-traceid: {TraceId} -// x-b3-parentspanid: {ParentSpanId} -// x-b3-spanid: {SpanId} -// x-b3-sampled: {SamplingState} -// x-b3-flags: {DebugFlag} -// -// The Single Header propagator is used by default. -func New(opts ...Option) propagation.TextMapPropagator { - cfg := newConfig(opts...) - return propagator{ - cfg: *cfg, - } -} - -// Inject injects a context into the carrier as B3 headers. -// The parent span ID is omitted because it is not tracked in the -// SpanContext. -func (b3 propagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { - sc := trace.SpanFromContext(ctx).SpanContext() - - if b3.cfg.InjectEncoding.supports(B3SingleHeader) || b3.cfg.InjectEncoding == B3Unspecified { - header := []string{} - if sc.TraceID().IsValid() && sc.SpanID().IsValid() { - header = append(header, sc.TraceID().String(), sc.SpanID().String()) - } - - if debugFromContext(ctx) { - header = append(header, "d") - } else if !(deferredFromContext(ctx)) { - if sc.IsSampled() { - header = append(header, "1") - } else { - header = append(header, "0") - } - } - - carrier.Set(b3ContextHeader, strings.Join(header, "-")) - } - - if b3.cfg.InjectEncoding.supports(B3MultipleHeader) { - if sc.TraceID().IsValid() && sc.SpanID().IsValid() { - carrier.Set(b3TraceIDHeader, sc.TraceID().String()) - carrier.Set(b3SpanIDHeader, sc.SpanID().String()) - } - - if debugFromContext(ctx) { - // Since Debug implies deferred, don't also send "X-B3-Sampled". - carrier.Set(b3DebugFlagHeader, "1") - } else if !(deferredFromContext(ctx)) { - if sc.IsSampled() { - carrier.Set(b3SampledHeader, "1") - } else { - carrier.Set(b3SampledHeader, "0") - } - } - } -} - -// Extract extracts a context from the carrier if it contains B3 headers. -func (propagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { - var ( - sc trace.SpanContext - err error - ) - - // Default to Single Header if a valid value exists. - if h := carrier.Get(b3ContextHeader); h != "" { - ctx, sc, err = extractSingle(ctx, h) - if err == nil && sc.IsValid() { - return trace.ContextWithRemoteSpanContext(ctx, sc) - } - // The Single Header value was invalid, fallback to Multiple Header. - } - - var ( - traceID = carrier.Get(b3TraceIDHeader) - spanID = carrier.Get(b3SpanIDHeader) - parentSpanID = carrier.Get(b3ParentSpanIDHeader) - sampled = carrier.Get(b3SampledHeader) - debugFlag = carrier.Get(b3DebugFlagHeader) - ) - ctx, sc, err = extractMultiple(ctx, traceID, spanID, parentSpanID, sampled, debugFlag) - if err != nil || !sc.IsValid() { - // clear the deferred flag if we don't have a valid SpanContext - return withDeferred(ctx, false) - } - return trace.ContextWithRemoteSpanContext(ctx, sc) -} - -func (b3 propagator) Fields() []string { - header := []string{} - if b3.cfg.InjectEncoding.supports(B3SingleHeader) { - header = append(header, b3ContextHeader) - } - if b3.cfg.InjectEncoding.supports(B3MultipleHeader) || b3.cfg.InjectEncoding == B3Unspecified { - header = append(header, b3TraceIDHeader, b3SpanIDHeader, b3SampledHeader, b3DebugFlagHeader) - } - return header -} - -// extractMultiple reconstructs a SpanContext from header values based on B3 -// Multiple header. It is based on the implementation found here: -// https://github.com/openzipkin/zipkin-go/blob/v0.2.2/propagation/b3/spancontext.go -// and adapted to support a SpanContext. -func extractMultiple(ctx context.Context, traceID, spanID, parentSpanID, sampled, flags string) (context.Context, trace.SpanContext, error) { - var ( - err error - requiredCount int - scc = trace.SpanContextConfig{} - ) - - // correct values for an existing sampled header are "0" and "1". - // For legacy support and being lenient to other tracing implementations we - // allow "true" and "false" as inputs for interop purposes. - switch strings.ToLower(sampled) { - case "0", "false": - // Zero value for TraceFlags sample bit is unset. - case "1", "true": - scc.TraceFlags = trace.FlagsSampled - case "": - ctx = withDeferred(ctx, true) - default: - return ctx, empty, errInvalidSampledHeader - } - - // The only accepted value for Flags is "1". This will set Debug bitmask and - // sampled bitmask to 1 since debug implicitly means sampled. All other - // values and omission of header will be ignored. According to the spec. User - // shouldn't send X-B3-Sampled header along with X-B3-Flags header. Thus we will - // ignore X-B3-Sampled header when X-B3-Flags header is sent and valid. - if flags == "1" { - ctx = withDeferred(ctx, false) - ctx = withDebug(ctx, true) - scc.TraceFlags |= trace.FlagsSampled - } - - if traceID != "" { - requiredCount++ - id := traceID - if len(traceID) == 16 { - // Pad 64-bit trace IDs. - id = b3TraceIDPadding + traceID - } - if scc.TraceID, err = trace.TraceIDFromHex(id); err != nil { - return ctx, empty, errInvalidTraceIDHeader - } - } - - if spanID != "" { - requiredCount++ - if scc.SpanID, err = trace.SpanIDFromHex(spanID); err != nil { - return ctx, empty, errInvalidSpanIDHeader - } - } - - if requiredCount != 0 && requiredCount != 2 { - return ctx, empty, errInvalidScope - } - - if parentSpanID != "" { - if requiredCount == 0 { - return ctx, empty, errInvalidScopeParent - } - // Validate parent span ID but we do not use it so do not save it. - if _, err = trace.SpanIDFromHex(parentSpanID); err != nil { - return ctx, empty, errInvalidParentSpanIDHeader - } - } - - return ctx, trace.NewSpanContext(scc), nil -} - -// extractSingle reconstructs a SpanContext from contextHeader based on a B3 -// Single header. It is based on the implementation found here: -// https://github.com/openzipkin/zipkin-go/blob/v0.2.2/propagation/b3/spancontext.go -// and adapted to support a SpanContext. -func extractSingle(ctx context.Context, contextHeader string) (context.Context, trace.SpanContext, error) { - if contextHeader == "" { - return ctx, empty, errEmptyContext - } - - var ( - scc = trace.SpanContextConfig{} - sampling string - ) - - headerLen := len(contextHeader) - - switch { - case headerLen == samplingWidth: - sampling = contextHeader - case headerLen == traceID64BitsWidth || headerLen == traceID128BitsWidth: - // Trace ID by itself is invalid. - return ctx, empty, errInvalidScope - case headerLen >= traceID64BitsWidth+spanIDWidth+separatorWidth: - pos := 0 - var traceID string - switch { - case string(contextHeader[traceID64BitsWidth]) == "-": - // traceID must be 64 bits - pos += traceID64BitsWidth // {traceID} - traceID = b3TraceIDPadding + contextHeader[0:pos] - case string(contextHeader[32]) == "-": - // traceID must be 128 bits - pos += traceID128BitsWidth // {traceID} - traceID = contextHeader[0:pos] - default: - return ctx, empty, errInvalidTraceIDValue - } - var err error - scc.TraceID, err = trace.TraceIDFromHex(traceID) - if err != nil { - return ctx, empty, errInvalidTraceIDValue - } - pos += separatorWidth // {traceID}- - - if headerLen < pos+spanIDWidth { - return ctx, empty, errInvalidSpanIDValue - } - scc.SpanID, err = trace.SpanIDFromHex(contextHeader[pos : pos+spanIDWidth]) - if err != nil { - return ctx, empty, errInvalidSpanIDValue - } - pos += spanIDWidth // {traceID}-{spanID} - - if headerLen > pos { - if headerLen == pos+separatorWidth { - // {traceID}-{spanID}- is invalid. - return ctx, empty, errInvalidSampledByte - } - pos += separatorWidth // {traceID}-{spanID}- - - switch headerLen { - case pos + samplingWidth: - sampling = string(contextHeader[pos]) - case pos + parentSpanIDWidth: - // {traceID}-{spanID}-{parentSpanID} is invalid. - return ctx, empty, errInvalidScopeParentSingle - case pos + samplingWidth + separatorWidth + parentSpanIDWidth: - sampling = string(contextHeader[pos]) - pos += samplingWidth + separatorWidth // {traceID}-{spanID}-{sampling}- - - // Validate parent span ID but we do not use it so do not - // save it. - _, err = trace.SpanIDFromHex(contextHeader[pos:]) - if err != nil { - return ctx, empty, errInvalidParentSpanIDValue - } - default: - return ctx, empty, errInvalidParentSpanIDValue - } - } - default: - return ctx, empty, errInvalidTraceIDValue - } - switch sampling { - case "": - ctx = withDeferred(ctx, true) - case "d": - ctx = withDebug(ctx, true) - scc.TraceFlags = trace.FlagsSampled - case "1": - scc.TraceFlags = trace.FlagsSampled - case "0": - // Zero value for TraceFlags sample bit is unset. - default: - return ctx, empty, errInvalidSampledByte - } - - return ctx, trace.NewSpanContext(scc), nil -} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/context.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/context.go deleted file mode 100644 index 7d2ae0d703f..00000000000 --- a/vendor/go.opentelemetry.io/contrib/propagators/b3/context.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package b3 // import "go.opentelemetry.io/contrib/propagators/b3" - -import "context" - -type b3KeyType int - -const ( - debugKey b3KeyType = iota - deferredKey -) - -// withDebug returns a copy of parent with debug set as the debug flag value . -func withDebug(parent context.Context, debug bool) context.Context { - return context.WithValue(parent, debugKey, debug) -} - -// debugFromContext returns the debug value stored in ctx. -// -// If no debug value is stored in ctx false is returned. -func debugFromContext(ctx context.Context) bool { - if ctx == nil { - return false - } - if debug, ok := ctx.Value(debugKey).(bool); ok { - return debug - } - return false -} - -// withDeferred returns a copy of parent with deferred set as the deferred flag value . -func withDeferred(parent context.Context, deferred bool) context.Context { - return context.WithValue(parent, deferredKey, deferred) -} - -// deferredFromContext returns the deferred value stored in ctx. -// -// If no deferred value is stored in ctx false is returned. -func deferredFromContext(ctx context.Context) bool { - if ctx == nil { - return false - } - if deferred, ok := ctx.Value(deferredKey).(bool); ok { - return deferred - } - return false -} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/doc.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/doc.go deleted file mode 100644 index 8c3c42a9817..00000000000 --- a/vendor/go.opentelemetry.io/contrib/propagators/b3/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package b3 implements the B3 propagator specification as defined at -// https://github.com/openzipkin/b3-propagation -package b3 // import "go.opentelemetry.io/contrib/propagators/b3" diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go deleted file mode 100644 index bff5d96777b..00000000000 --- a/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package b3 // import "go.opentelemetry.io/contrib/propagators/b3" - -// Version is the current release version of the B3 propagator. -func Version() string { - return "1.38.0" - // This string is updated by the pre_release.sh script during release -} - -// SemVersion is the semantic version to be supplied to tracer/meter creation. -// -// Deprecated: Use [Version] instead. -func SemVersion() string { - return Version() -} diff --git a/vendor/go.opentelemetry.io/otel/log/global/README.md b/vendor/go.opentelemetry.io/otel/log/global/README.md deleted file mode 100644 index 11e5afefc01..00000000000 --- a/vendor/go.opentelemetry.io/otel/log/global/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Log Global - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/log/global)](https://pkg.go.dev/go.opentelemetry.io/otel/log/global) diff --git a/vendor/go.opentelemetry.io/otel/log/global/log.go b/vendor/go.opentelemetry.io/otel/log/global/log.go deleted file mode 100644 index bfdb1847908..00000000000 --- a/vendor/go.opentelemetry.io/otel/log/global/log.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package global provides access to a global implementation of the OpenTelemetry -Logs API. - -This package is experimental. It will be deprecated and removed when the [log] -package becomes stable. Its functionality will be migrated to -go.opentelemetry.io/otel. -*/ -package global // import "go.opentelemetry.io/otel/log/global" - -import ( - "go.opentelemetry.io/otel/log" - "go.opentelemetry.io/otel/log/internal/global" -) - -// Logger returns a [log.Logger] configured with the provided name and options -// from the globally configured [log.LoggerProvider]. -// -// If this is called before a global LoggerProvider is configured, the returned -// Logger will be a No-Op implementation of a Logger. When a global -// LoggerProvider is registered for the first time, the returned Logger is -// updated in-place to report to this new LoggerProvider. There is no need to -// call this function again for an updated instance. -// -// This is a convenience function. It is equivalent to: -// -// GetLoggerProvider().Logger(name, options...) -func Logger(name string, options ...log.LoggerOption) log.Logger { - return GetLoggerProvider().Logger(name, options...) -} - -// GetLoggerProvider returns the globally configured [log.LoggerProvider]. -// -// If a global LoggerProvider has not been configured with [SetLoggerProvider], -// the returned Logger will be a No-Op implementation of a LoggerProvider. When -// a global LoggerProvider is registered for the first time, the returned -// LoggerProvider and all of its created Loggers are updated in-place. There is -// no need to call this function again for an updated instance. -func GetLoggerProvider() log.LoggerProvider { - return global.GetLoggerProvider() -} - -// SetLoggerProvider configures provider as the global [log.LoggerProvider]. -func SetLoggerProvider(provider log.LoggerProvider) { - global.SetLoggerProvider(provider) -} diff --git a/vendor/go.opentelemetry.io/otel/log/internal/global/log.go b/vendor/go.opentelemetry.io/otel/log/internal/global/log.go deleted file mode 100644 index e463acbf193..00000000000 --- a/vendor/go.opentelemetry.io/otel/log/internal/global/log.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package global is the internal implementation of the OpenTelemetry global -// Logs API. -package global // import "go.opentelemetry.io/otel/log/internal/global" - -import ( - "context" - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/log" - "go.opentelemetry.io/otel/log/embedded" -) - -// instLib defines the instrumentation library a logger is created for. -// -// Do not use sdk/instrumentation (API cannot depend on the SDK). -type instLib struct { - name string - version string - schemaURL string - attrs attribute.Set -} - -type loggerProvider struct { - embedded.LoggerProvider - - mu sync.Mutex - loggers map[instLib]*logger - delegate log.LoggerProvider -} - -// Compile-time guarantee loggerProvider implements LoggerProvider. -var _ log.LoggerProvider = (*loggerProvider)(nil) - -func (p *loggerProvider) Logger(name string, options ...log.LoggerOption) log.Logger { - p.mu.Lock() - defer p.mu.Unlock() - - if p.delegate != nil { - return p.delegate.Logger(name, options...) - } - - cfg := log.NewLoggerConfig(options...) - key := instLib{ - name: name, - version: cfg.InstrumentationVersion(), - schemaURL: cfg.SchemaURL(), - attrs: cfg.InstrumentationAttributes(), - } - - if p.loggers == nil { - l := &logger{name: name, options: options} - p.loggers = map[instLib]*logger{key: l} - return l - } - - if l, ok := p.loggers[key]; ok { - return l - } - - l := &logger{name: name, options: options} - p.loggers[key] = l - return l -} - -func (p *loggerProvider) setDelegate(provider log.LoggerProvider) { - p.mu.Lock() - defer p.mu.Unlock() - - p.delegate = provider - for _, l := range p.loggers { - l.setDelegate(provider) - } - p.loggers = nil // Only set logger delegates once. -} - -type logger struct { - embedded.Logger - - name string - options []log.LoggerOption - - delegate atomic.Value // log.Logger -} - -// Compile-time guarantee logger implements Logger. -var _ log.Logger = (*logger)(nil) - -func (l *logger) Emit(ctx context.Context, r log.Record) { - if del, ok := l.delegate.Load().(log.Logger); ok { - del.Emit(ctx, r) - } -} - -func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool { - var enabled bool - if del, ok := l.delegate.Load().(log.Logger); ok { - enabled = del.Enabled(ctx, param) - } - return enabled -} - -func (l *logger) setDelegate(provider log.LoggerProvider) { - l.delegate.Store(provider.Logger(l.name, l.options...)) -} diff --git a/vendor/go.opentelemetry.io/otel/log/internal/global/state.go b/vendor/go.opentelemetry.io/otel/log/internal/global/state.go deleted file mode 100644 index dbe1c2fbfb6..00000000000 --- a/vendor/go.opentelemetry.io/otel/log/internal/global/state.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package global // import "go.opentelemetry.io/otel/log/internal/global" - -import ( - "errors" - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/log" -) - -var ( - globalLoggerProvider = defaultLoggerProvider() - - delegateLoggerOnce sync.Once -) - -func defaultLoggerProvider() *atomic.Value { - v := &atomic.Value{} - v.Store(loggerProviderHolder{provider: &loggerProvider{}}) - return v -} - -type loggerProviderHolder struct { - provider log.LoggerProvider -} - -// GetLoggerProvider returns the global LoggerProvider. -func GetLoggerProvider() log.LoggerProvider { - return globalLoggerProvider.Load().(loggerProviderHolder).provider -} - -// SetLoggerProvider sets the global LoggerProvider. -func SetLoggerProvider(provider log.LoggerProvider) { - current := GetLoggerProvider() - if _, cOk := current.(*loggerProvider); cOk { - if _, mpOk := provider.(*loggerProvider); mpOk && current == provider { - err := errors.New("invalid delegation: LoggerProvider self-delegation") - global.Error(err, "No delegate will be configured") - return - } - } - - delegateLoggerOnce.Do(func() { - if def, ok := current.(*loggerProvider); ok { - def.setDelegate(provider) - } - }) - globalLoggerProvider.Store(loggerProviderHolder{provider: provider}) -} diff --git a/vendor/go.opentelemetry.io/otel/log/kind_string.go b/vendor/go.opentelemetry.io/otel/log/kind_string.go index bdfaa18665c..b4f9e533545 100644 --- a/vendor/go.opentelemetry.io/otel/log/kind_string.go +++ b/vendor/go.opentelemetry.io/otel/log/kind_string.go @@ -23,8 +23,9 @@ const _Kind_name = "EmptyBoolFloat64Int64StringBytesSliceMap" var _Kind_index = [...]uint8{0, 5, 9, 16, 21, 27, 32, 37, 40} func (i Kind) String() string { - if i < 0 || i >= Kind(len(_Kind_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Kind_index)-1 { return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] + return _Kind_name[_Kind_index[idx]:_Kind_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/log/logger.go b/vendor/go.opentelemetry.io/otel/log/logger.go index 8441ca88408..d9decebdef1 100644 --- a/vendor/go.opentelemetry.io/otel/log/logger.go +++ b/vendor/go.opentelemetry.io/otel/log/logger.go @@ -5,6 +5,7 @@ package log // import "go.opentelemetry.io/otel/log" import ( "context" + "slices" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/log/embedded" @@ -114,13 +115,52 @@ func WithInstrumentationVersion(version string) LoggerOption { }) } +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + // WithInstrumentationAttributes returns a [LoggerOption] that sets the // instrumentation attributes of a [Logger]. // -// The passed attributes will be de-duplicated. +// This is equivalent to calling WithInstrumentationAttributeSet with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) LoggerOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet returns a [LoggerOption] that adds the +// instrumentation attributes of a [Logger]. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) LoggerOption { + if set.Len() == 0 { + return loggerOptionFunc(func(config LoggerConfig) LoggerConfig { + return config + }) + } + return loggerOptionFunc(func(config LoggerConfig) LoggerConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } diff --git a/vendor/go.opentelemetry.io/otel/log/severity_string.go b/vendor/go.opentelemetry.io/otel/log/severity_string.go index 4c20fa5e8aa..fb94caeaba5 100644 --- a/vendor/go.opentelemetry.io/otel/log/severity_string.go +++ b/vendor/go.opentelemetry.io/otel/log/severity_string.go @@ -40,8 +40,9 @@ const _Severity_name = "UNDEFINEDTRACETRACE2TRACE3TRACE4DEBUGDEBUG2DEBUG3DEBUG4I var _Severity_index = [...]uint8{0, 9, 14, 20, 26, 32, 37, 43, 49, 55, 59, 64, 69, 74, 78, 83, 88, 93, 98, 104, 110, 116, 121, 127, 133, 139} func (i Severity) String() string { - if i < 0 || i >= Severity(len(_Severity_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Severity_index)-1 { return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Severity_name[_Severity_index[i]:_Severity_index[i+1]] + return _Severity_name[_Severity_index[idx]:_Severity_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md deleted file mode 100644 index 2de1fc3c6be..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.26.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go deleted file mode 100644 index d8dc822b263..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go +++ /dev/null @@ -1,8996 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -import "go.opentelemetry.io/otel/attribute" - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// ASP.NET Core attributes -const ( - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - - // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to - // the "aspnetcore.diagnostics.handler.type" semantic conventions. It - // represents the full type name of the - // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) - // implementation that handled the exception. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if and only if the exception - // was handled by this handler.) - // Stability: stable - // Examples: 'Contoso.MyHandler' - AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") - - // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming - // to the "aspnetcore.diagnostics.exception.result" semantic conventions. - // It represents the aSP.NET Core exception middleware handling result - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'handled', 'unhandled' - AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") - - // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.policy" semantic conventions. It represents - // the rate limiting policy name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'fixed', 'sliding', 'token' - AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the - // "aspnetcore.request.is_unhandled" semantic conventions. It represents - // the flag indicating if request was handled by the application pipeline. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") - - // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the - // "aspnetcore.routing.is_fallback" semantic conventions. It represents a - // value that indicates whether the matched route is a fallback route. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") - - // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the - // "aspnetcore.routing.match_status" semantic conventions. It represents - // the match result - success or failure - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'success', 'failure' - AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") -) - -var ( - // Lease was acquired - AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") - // Lease request was rejected by the endpoint limiter - AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") - // Lease request was rejected by the global limiter - AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") - // Lease request was canceled - AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") -) - -var ( - // Exception was handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") - // Exception was not handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") - // Exception handling was skipped because the response had started - AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") - // Exception handling didn't run because the request was aborted - AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") -) - -var ( - // Match succeeded - AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") - // Match failed - AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") -) - -// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming -// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It -// represents the full type name of the -// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) -// implementation that handled the exception. -func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { - return AspnetcoreDiagnosticsHandlerTypeKey.String(val) -} - -// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to -// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents -// the rate limiting policy name. -func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { - return AspnetcoreRateLimitingPolicyKey.String(val) -} - -// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to -// the "aspnetcore.request.is_unhandled" semantic conventions. It represents -// the flag indicating if request was handled by the application pipeline. -func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { - return AspnetcoreRequestIsUnhandledKey.Bool(val) -} - -// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to -// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a -// value that indicates whether the matched route is a fallback route. -func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { - return AspnetcoreRoutingIsFallbackKey.Bool(val) -} - -// Generic attributes for AWS services. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes for AWS DynamoDB. -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") - - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the number of -// items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// Attributes for AWS Elastic Container Service (ECS). -const ( - // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" - // semantic conventions. It represents the ID of a running ECS task. The ID - // MUST be extracted from `task.arn`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is - // populated.) - // Stability: experimental - // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', - // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a - // running [ECS - // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', - // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the family - // name of the [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) - // used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for the task definition used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSTaskID returns an attribute KeyValue conforming to the -// "aws.ecs.task.id" semantic conventions. It represents the ID of a running -// ECS task. The ID MUST be extracted from `task.arn`. -func AWSECSTaskID(val string) attribute.KeyValue { - return AWSECSTaskIDKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running -// [ECS -// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the family name of -// the [ECS task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) -// used to create the ECS task. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// the task definition used to create the ECS task. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Attributes for AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Attributes for AWS Logs. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Attributes for AWS Lambda. -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for AWS S3. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// The web browser attributes -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// A cloud environment (e.g. GCP, Azure, AWS). -const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Apps - CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// Attributes for CloudEvents. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// A container instance. -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerCPUStateKey is the attribute Key conforming to the - // "container.cpu.state" semantic conventions. It represents the CPU state - // for this data point. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'user', 'kernel' - ContainerCPUStateKey = attribute.Key("container.cpu.state") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assigned by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -var ( - // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) - ContainerCPUStateUser = ContainerCPUStateKey.String("user") - // When CPU is used by the system (host OS) - ContainerCPUStateSystem = ContainerCPUStateKey.String("system") - // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) - ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// This group defines the attributes used to describe telemetry in the context -// of databases. -const ( - // DBClientConnectionsPoolNameKey is the attribute Key conforming to the - // "db.client.connections.pool.name" semantic conventions. It represents - // the name of the connection pool; unique within the instrumented - // application. In case the connection pool implementation doesn't provide - // a name, instrumentation should use a combination of `server.address` and - // `server.port` attributes formatted as `server.address:server.port`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myDataSource' - DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") - - // DBClientConnectionsStateKey is the attribute Key conforming to the - // "db.client.connections.state" semantic conventions. It represents the - // state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle' - DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") - - // DBCollectionNameKey is the attribute Key conforming to the - // "db.collection.name" semantic conventions. It represents the name of a - // collection (table, container) within the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: If the collection name is parsed from the query, it SHOULD match - // the value provided in the query and may be qualified with the schema and - // database name. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBCollectionNameKey = attribute.Key("db.collection.name") - - // DBNamespaceKey is the attribute Key conforming to the "db.namespace" - // semantic conventions. It represents the name of the database, fully - // qualified within the server address and port. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'test.users' - // Note: If a database system has multiple namespace components, they - // SHOULD be concatenated (potentially using database system specific - // conventions) from most general to most specific namespace component, and - // more specific namespaces SHOULD NOT be captured without the more general - // namespaces, to ensure that "startswith" queries for the more general - // namespaces will be valid. - // Semantic conventions for individual database systems SHOULD document - // what `db.namespace` means in the context of that system. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBNamespaceKey = attribute.Key("db.namespace") - - // DBOperationNameKey is the attribute Key conforming to the - // "db.operation.name" semantic conventions. It represents the name of the - // operation or command being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: It is RECOMMENDED to capture the value as provided by the - // application without attempting to do any case normalization. - DBOperationNameKey = attribute.Key("db.operation.name") - - // DBQueryTextKey is the attribute Key conforming to the "db.query.text" - // semantic conventions. It represents the database query being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey - // "WuValue"' - DBQueryTextKey = attribute.Key("db.query.text") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents the database management system (DBMS) product - // as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual DBMS may differ from the one identified by the client. - // For example, when using PostgreSQL client libraries to connect to a - // CockroachDB, the `db.system` is set to `postgresql` based on the - // instrumentation's best knowledge. - DBSystemKey = attribute.Key("db.system") -) - -var ( - // idle - DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") - // used - DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBClientConnectionsPoolName returns an attribute KeyValue conforming to -// the "db.client.connections.pool.name" semantic conventions. It represents -// the name of the connection pool; unique within the instrumented application. -// In case the connection pool implementation doesn't provide a name, -// instrumentation should use a combination of `server.address` and -// `server.port` attributes formatted as `server.address:server.port`. -func DBClientConnectionsPoolName(val string) attribute.KeyValue { - return DBClientConnectionsPoolNameKey.String(val) -} - -// DBCollectionName returns an attribute KeyValue conforming to the -// "db.collection.name" semantic conventions. It represents the name of a -// collection (table, container) within the database. -func DBCollectionName(val string) attribute.KeyValue { - return DBCollectionNameKey.String(val) -} - -// DBNamespace returns an attribute KeyValue conforming to the -// "db.namespace" semantic conventions. It represents the name of the database, -// fully qualified within the server address and port. -func DBNamespace(val string) attribute.KeyValue { - return DBNamespaceKey.String(val) -} - -// DBOperationName returns an attribute KeyValue conforming to the -// "db.operation.name" semantic conventions. It represents the name of the -// operation or command being executed. -func DBOperationName(val string) attribute.KeyValue { - return DBOperationNameKey.String(val) -} - -// DBQueryText returns an attribute KeyValue conforming to the -// "db.query.text" semantic conventions. It represents the database query being -// executed. -func DBQueryText(val string) attribute.KeyValue { - return DBQueryTextKey.String(val) -} - -// This group defines attributes for Cassandra. -const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// This group defines attributes for Azure Cosmos DB. -const ( - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// This group defines attributes for Elasticsearch. -const ( - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") -) - -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// Attributes for software deployments. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// Attributes that represents an occurrence of a lifecycle transition on the -// Android platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the deprecated use the - // `device.app.lifecycle` event definition including `android.state` as a - // payload field instead. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// Describes device attributes. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// These attributes may be used for any disk related operation. -const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// The shared attributes used to report a DNS query. -const ( - // DNSQuestionNameKey is the attribute Key conforming to the - // "dns.question.name" semantic conventions. It represents the name being - // queried. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.example.com', 'opentelemetry.io' - // Note: If the name field contains non-printable characters (below 32 or - // above 126), those characters should be represented as escaped base 10 - // integers (\DDD). Back slashes and quotes should be escaped. Tabs, - // carriage returns, and line feeds should be converted to \t, \r, and \n - // respectively. - DNSQuestionNameKey = attribute.Key("dns.question.name") -) - -// DNSQuestionName returns an attribute KeyValue conforming to the -// "dns.question.name" semantic conventions. It represents the name being -// queried. -func DNSQuestionName(val string) attribute.KeyValue { - return DNSQuestionNameKey.String(val) -} - -// Attributes for operations with an authenticated and/or authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// The shared attributes used to report an error. -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable, and SHOULD have low - // cardinality. - // - // When `error.type` is set to a type (e.g., an exception type), its - // canonical class name identifying the type within the artifact SHOULD be - // used. - // - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: - // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// FaaS attributes -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") - - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// Attributes for Feature Flags. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// Describes file attributes. -const ( - // FileDirectoryKey is the attribute Key conforming to the "file.directory" - // semantic conventions. It represents the directory where the file is - // located. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/user', 'C:\\Program Files\\MyApp' - FileDirectoryKey = attribute.Key("file.directory") - - // FileExtensionKey is the attribute Key conforming to the "file.extension" - // semantic conventions. It represents the file extension, excluding the - // leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: When the file name has multiple extensions (example.tar.gz), only - // the last one should be captured ("gz", not "tar.gz"). - FileExtensionKey = attribute.Key("file.extension") - - // FileNameKey is the attribute Key conforming to the "file.name" semantic - // conventions. It represents the name of the file including the extension, - // without the directory. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.png' - FileNameKey = attribute.Key("file.name") - - // FilePathKey is the attribute Key conforming to the "file.path" semantic - // conventions. It represents the full path to the file, including the file - // name. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/alice/example.png', 'C:\\Program - // Files\\MyApp\\myapp.exe' - FilePathKey = attribute.Key("file.path") - - // FileSizeKey is the attribute Key conforming to the "file.size" semantic - // conventions. It represents the file size in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - FileSizeKey = attribute.Key("file.size") -) - -// FileDirectory returns an attribute KeyValue conforming to the -// "file.directory" semantic conventions. It represents the directory where the -// file is located. It should include the drive letter, when appropriate. -func FileDirectory(val string) attribute.KeyValue { - return FileDirectoryKey.String(val) -} - -// FileExtension returns an attribute KeyValue conforming to the -// "file.extension" semantic conventions. It represents the file extension, -// excluding the leading dot. -func FileExtension(val string) attribute.KeyValue { - return FileExtensionKey.String(val) -} - -// FileName returns an attribute KeyValue conforming to the "file.name" -// semantic conventions. It represents the name of the file including the -// extension, without the directory. -func FileName(val string) attribute.KeyValue { - return FileNameKey.String(val) -} - -// FilePath returns an attribute KeyValue conforming to the "file.path" -// semantic conventions. It represents the full path to the file, including the -// file name. It should include the drive letter, when appropriate. -func FilePath(val string) attribute.KeyValue { - return FilePathKey.String(val) -} - -// FileSize returns an attribute KeyValue conforming to the "file.size" -// semantic conventions. It represents the file size in bytes. -func FileSize(val int) attribute.KeyValue { - return FileSizeKey.Int(val) -} - -// Attributes for Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Attributes for Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// The attributes used to describe telemetry in the context of LLM (Large -// Language Models) requests and responses. -const ( - // GenAiCompletionKey is the attribute Key conforming to the - // "gen_ai.completion" semantic conventions. It represents the full - // response received from the LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'assistant', 'content': 'The capital of France is - // Paris.'}]" - // Note: It's RECOMMENDED to format completions as JSON string matching - // [OpenAI messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiCompletionKey = attribute.Key("gen_ai.completion") - - // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" - // semantic conventions. It represents the full prompt sent to an LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'user', 'content': 'What is the capital of - // France?'}]" - // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI - // messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiPromptKey = attribute.Key("gen_ai.prompt") - - // GenAiRequestMaxTokensKey is the attribute Key conforming to the - // "gen_ai.request.max_tokens" semantic conventions. It represents the - // maximum number of tokens the LLM generates for a request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") - - // GenAiRequestModelKey is the attribute Key conforming to the - // "gen_ai.request.model" semantic conventions. It represents the name of - // the LLM a request is being made to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4' - GenAiRequestModelKey = attribute.Key("gen_ai.request.model") - - // GenAiRequestTemperatureKey is the attribute Key conforming to the - // "gen_ai.request.temperature" semantic conventions. It represents the - // temperature setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0.0 - GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") - - // GenAiRequestTopPKey is the attribute Key conforming to the - // "gen_ai.request.top_p" semantic conventions. It represents the top_p - // sampling setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0 - GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") - - // GenAiResponseFinishReasonsKey is the attribute Key conforming to the - // "gen_ai.response.finish_reasons" semantic conventions. It represents the - // array of reasons the model stopped generating tokens, corresponding to - // each generation received. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'stop' - GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") - - // GenAiResponseIDKey is the attribute Key conforming to the - // "gen_ai.response.id" semantic conventions. It represents the unique - // identifier for the completion. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'chatcmpl-123' - GenAiResponseIDKey = attribute.Key("gen_ai.response.id") - - // GenAiResponseModelKey is the attribute Key conforming to the - // "gen_ai.response.model" semantic conventions. It represents the name of - // the LLM a response was generated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4-0613' - GenAiResponseModelKey = attribute.Key("gen_ai.response.model") - - // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" - // semantic conventions. It represents the Generative AI product as - // identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'openai' - // Note: The actual GenAI product may differ from the one identified by the - // client. For example, when using OpenAI client libraries to communicate - // with Mistral, the `gen_ai.system` is set to `openai` based on the - // instrumentation's best knowledge. - GenAiSystemKey = attribute.Key("gen_ai.system") - - // GenAiUsageCompletionTokensKey is the attribute Key conforming to the - // "gen_ai.usage.completion_tokens" semantic conventions. It represents the - // number of tokens used in the LLM response (completion). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 180 - GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") - - // GenAiUsagePromptTokensKey is the attribute Key conforming to the - // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the - // number of tokens used in the LLM prompt. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") -) - -var ( - // OpenAI - GenAiSystemOpenai = GenAiSystemKey.String("openai") -) - -// GenAiCompletion returns an attribute KeyValue conforming to the -// "gen_ai.completion" semantic conventions. It represents the full response -// received from the LLM. -func GenAiCompletion(val string) attribute.KeyValue { - return GenAiCompletionKey.String(val) -} - -// GenAiPrompt returns an attribute KeyValue conforming to the -// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to -// an LLM. -func GenAiPrompt(val string) attribute.KeyValue { - return GenAiPromptKey.String(val) -} - -// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the -// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum -// number of tokens the LLM generates for a request. -func GenAiRequestMaxTokens(val int) attribute.KeyValue { - return GenAiRequestMaxTokensKey.Int(val) -} - -// GenAiRequestModel returns an attribute KeyValue conforming to the -// "gen_ai.request.model" semantic conventions. It represents the name of the -// LLM a request is being made to. -func GenAiRequestModel(val string) attribute.KeyValue { - return GenAiRequestModelKey.String(val) -} - -// GenAiRequestTemperature returns an attribute KeyValue conforming to the -// "gen_ai.request.temperature" semantic conventions. It represents the -// temperature setting for the LLM request. -func GenAiRequestTemperature(val float64) attribute.KeyValue { - return GenAiRequestTemperatureKey.Float64(val) -} - -// GenAiRequestTopP returns an attribute KeyValue conforming to the -// "gen_ai.request.top_p" semantic conventions. It represents the top_p -// sampling setting for the LLM request. -func GenAiRequestTopP(val float64) attribute.KeyValue { - return GenAiRequestTopPKey.Float64(val) -} - -// GenAiResponseFinishReasons returns an attribute KeyValue conforming to -// the "gen_ai.response.finish_reasons" semantic conventions. It represents the -// array of reasons the model stopped generating tokens, corresponding to each -// generation received. -func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { - return GenAiResponseFinishReasonsKey.StringSlice(val) -} - -// GenAiResponseID returns an attribute KeyValue conforming to the -// "gen_ai.response.id" semantic conventions. It represents the unique -// identifier for the completion. -func GenAiResponseID(val string) attribute.KeyValue { - return GenAiResponseIDKey.String(val) -} - -// GenAiResponseModel returns an attribute KeyValue conforming to the -// "gen_ai.response.model" semantic conventions. It represents the name of the -// LLM a response was generated from. -func GenAiResponseModel(val string) attribute.KeyValue { - return GenAiResponseModelKey.String(val) -} - -// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to -// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the -// number of tokens used in the LLM response (completion). -func GenAiUsageCompletionTokens(val int) attribute.KeyValue { - return GenAiUsageCompletionTokensKey.Int(val) -} - -// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number -// of tokens used in the LLM prompt. -func GenAiUsagePromptTokens(val int) attribute.KeyValue { - return GenAiUsagePromptTokensKey.Int(val) -} - -// Attributes for GraphQL. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// Attributes for the Android platform on which the Android application is -// running. -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1', 'r1p1' - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val string) attribute.KeyValue { - return HostCPUSteppingKey.String(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Semantic convention attributes in the HTTP namespace. -const ( - // HTTPConnectionStateKey is the attribute Key conforming to the - // "http.connection.state" semantic conventions. It represents the state of - // the HTTP connection in the HTTP connection pool. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'active', 'idle' - HTTPConnectionStateKey = attribute.Key("http.connection.state") - - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the - // ordinal number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPRequestSizeKey is the attribute Key conforming to the - // "http.request.size" semantic conventions. It represents the total size - // of the request in bytes. This should be the total number of bytes sent - // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 - // and HTTP/3), headers, and request body if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPRequestSizeKey = attribute.Key("http.request.size") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseSizeKey is the attribute Key conforming to the - // "http.response.size" semantic conventions. It represents the total size - // of the response in bytes. This should be the total number of bytes sent - // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and - // HTTP/3), headers, and response body and trailers if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPResponseSizeKey = attribute.Key("http.response.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route, that is, the path - // template in the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -var ( - // active state - HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") - // idle state - HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPRequestSize returns an attribute KeyValue conforming to the -// "http.request.size" semantic conventions. It represents the total size of -// the request in bytes. This should be the total number of bytes sent over the -// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and request body if any. -func HTTPRequestSize(val int) attribute.KeyValue { - return HTTPRequestSizeKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseSize returns an attribute KeyValue conforming to the -// "http.response.size" semantic conventions. It represents the total size of -// the response in bytes. This should be the total number of bytes sent over -// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and response body and trailers if any. -func HTTPResponseSize(val int) attribute.KeyValue { - return HTTPResponseSizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Java Virtual machine related attributes. -const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") - - // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" - // semantic conventions. It represents the name of the garbage collector - // action. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'end of minor GC', 'end of major GC' - // Note: Garbage collector action is generally obtained via - // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). - JvmGcActionKey = attribute.Key("jvm.gc.action") - - // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" - // semantic conventions. It represents the name of the garbage collector. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Young Generation', 'G1 Old Generation' - // Note: Garbage collector name is generally obtained via - // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). - JvmGcNameKey = attribute.Key("jvm.gc.name") - - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") - - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") - - // JvmThreadDaemonKey is the attribute Key conforming to the - // "jvm.thread.daemon" semantic conventions. It represents the whether the - // thread is daemon or not. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") - - // JvmThreadStateKey is the attribute Key conforming to the - // "jvm.thread.state" semantic conventions. It represents the state of the - // thread. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'runnable', 'blocked' - JvmThreadStateKey = attribute.Key("jvm.thread.state") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -var ( - // A thread that has not yet started is in this state - JvmThreadStateNew = JvmThreadStateKey.String("new") - // A thread executing in the Java virtual machine is in this state - JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") - // A thread that is blocked waiting for a monitor lock is in this state - JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") - // A thread that is waiting indefinitely for another thread to perform a particular action is in this state - JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") - // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state - JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") - // A thread that has exited is in this state - JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") -) - -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) -} - -// JvmGcAction returns an attribute KeyValue conforming to the -// "jvm.gc.action" semantic conventions. It represents the name of the garbage -// collector action. -func JvmGcAction(val string) attribute.KeyValue { - return JvmGcActionKey.String(val) -} - -// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" -// semantic conventions. It represents the name of the garbage collector. -func JvmGcName(val string) attribute.KeyValue { - return JvmGcNameKey.String(val) -} - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// JvmThreadDaemon returns an attribute KeyValue conforming to the -// "jvm.thread.daemon" semantic conventions. It represents the whether the -// thread is daemon or not. -func JvmThreadDaemon(val bool) attribute.KeyValue { - return JvmThreadDaemonKey.Bool(val) -} - -// Kubernetes resource attributes. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SContainerStatusLastTerminatedReasonKey is the attribute Key - // conforming to the "k8s.container.status.last_terminated_reason" semantic - // conventions. It represents the last terminated reason of the Container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Evicted', 'Error' - K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue -// conforming to the "k8s.container.status.last_terminated_reason" semantic -// conventions. It represents the last terminated reason of the Container. -func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { - return K8SContainerStatusLastTerminatedReasonKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// Attributes for a file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// The generic attributes that may be used in any Log Record. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Attributes describing telemetry around messaging systems and messaging -// activities. -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client.id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client.id") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationPartitionIDKey is the attribute Key conforming to - // the "messaging.destination.partition.id" semantic conventions. It - // represents the identifier of the partition messages are sent to or - // received from, unique within the `messaging.destination.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1' - MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationPublishAnonymousKey is the attribute Key conforming - // to the "messaging.destination_publish.anonymous" semantic conventions. - // It represents a boolean that is true if the publish message destination - // is anonymous (could be unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") - - // MessagingDestinationPublishNameKey is the attribute Key conforming to - // the "messaging.destination_publish.name" semantic conventions. It - // represents the name of the original destination the message was - // published to - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: The name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker doesn't have such notion, the original destination name - // SHOULD uniquely identify the broker. - MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the - // size of the message body in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1439 - // Note: This can refer to both the compressed or uncompressed body size. - // If both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the conversation ID identifying the conversation to which the message - // belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents - // the size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2738 - // Note: This can refer to both the compressed or uncompressed size. If - // both sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationNameKey is the attribute Key conforming to the - // "messaging.operation.name" semantic conventions. It represents the - // system-specific name of the messaging operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack', 'nack', 'send' - MessagingOperationNameKey = attribute.Key("messaging.operation.name") - - // MessagingOperationTypeKey is the attribute Key conforming to the - // "messaging.operation.type" semantic conventions. It represents a string - // identifying the type of the messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationTypeKey = attribute.Key("messaging.operation.type") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents the messaging - // system as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual messaging system may differ from the one known by the - // client. For example, when using Kafka client libraries to communicate - // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on - // the instrumentation's best knowledge. - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") - // One or more messages are delivered to or processed by a consumer - MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") - // One or more messages are settled - MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") -) - -var ( - // Apache ActiveMQ - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") - // Azure Event Hubs - MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") - // Azure Service Bus - MessagingSystemServicebus = MessagingSystemKey.String("servicebus") - // Google Cloud Pub/Sub - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client.id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationPartitionID returns an attribute KeyValue conforming -// to the "messaging.destination.partition.id" semantic conventions. It -// represents the identifier of the partition messages are sent to or received -// from, unique within the `messaging.destination.name`. -func MessagingDestinationPartitionID(val string) attribute.KeyValue { - return MessagingDestinationPartitionIDKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationPublishAnonymous returns an attribute KeyValue -// conforming to the "messaging.destination_publish.anonymous" semantic -// conventions. It represents a boolean that is true if the publish message -// destination is anonymous (could be unnamed or have auto-generated name). -func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationPublishAnonymousKey.Bool(val) -} - -// MessagingDestinationPublishName returns an attribute KeyValue conforming -// to the "messaging.destination_publish.name" semantic conventions. It -// represents the name of the original destination the message was published to -func MessagingDestinationPublishName(val string) attribute.KeyValue { - return MessagingDestinationPublishNameKey.String(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingOperationName returns an attribute KeyValue conforming to the -// "messaging.operation.name" semantic conventions. It represents the -// system-specific name of the messaging operation. -func MessagingOperationName(val string) attribute.KeyValue { - return MessagingOperationNameKey.String(val) -} - -// This group describes attributes specific to Apache Kafka. -const ( - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") -) - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// This group describes attributes specific to RabbitMQ. -const ( - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming - // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. - // It represents the rabbitMQ message delivery tag - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 123 - MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") -) - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic -// conventions. It represents the rabbitMQ message delivery tag -func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { - return MessagingRabbitmqMessageDeliveryTagKey.Int(val) -} - -// This group describes attributes specific to RocketMQ. -const ( - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// This group describes attributes specific to GCP Pub/Sub. -const ( - // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. - // It represents the ack deadline in seconds set for the modify ack - // deadline request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") - - // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the - // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It - // represents the ack id for a given message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack_id' - MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") - - // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key - // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" - // semantic conventions. It represents the delivery attempt for a given - // message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") -) - -// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic -// conventions. It represents the ack deadline in seconds set for the modify -// ack deadline request. -func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) -} - -// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It -// represents the ack id for a given message. -func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageAckIDKey.String(val) -} - -// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic -// conventions. It represents the delivery attempt for a given message. -func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// This group describes attributes specific to Azure Service Bus. -const ( - // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key - // conforming to the "messaging.servicebus.destination.subscription_name" - // semantic conventions. It represents the name of the subscription in the - // topic messages are received from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mySubscription' - MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") - - // MessagingServicebusDispositionStatusKey is the attribute Key conforming - // to the "messaging.servicebus.disposition_status" semantic conventions. - // It represents the describes the [settlement - // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") - - // MessagingServicebusMessageDeliveryCountKey is the attribute Key - // conforming to the "messaging.servicebus.message.delivery_count" semantic - // conventions. It represents the number of deliveries that have been - // attempted for this message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") - - // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key - // conforming to the "messaging.servicebus.message.enqueued_time" semantic - // conventions. It represents the UTC epoch seconds at which the message - // has been accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") -) - -var ( - // Message is completed - MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") - // Message is abandoned - MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") - // Message is sent to dead letter queue - MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") - // Message is deferred - MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") -) - -// MessagingServicebusDestinationSubscriptionName returns an attribute -// KeyValue conforming to the -// "messaging.servicebus.destination.subscription_name" semantic conventions. -// It represents the name of the subscription in the topic messages are -// received from. -func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { - return MessagingServicebusDestinationSubscriptionNameKey.String(val) -} - -// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.delivery_count" semantic -// conventions. It represents the number of deliveries that have been attempted -// for this message. -func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { - return MessagingServicebusMessageDeliveryCountKey.Int(val) -} - -// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingServicebusMessageEnqueuedTimeKey.Int(val) -} - -// This group describes attributes specific to Azure Event Hubs. -const ( - // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to - // the "messaging.eventhubs.consumer.group" semantic conventions. It - // represents the name of the consumer group the event consumer is - // associated with. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'indexer' - MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") - - // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming - // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. - // It represents the UTC epoch seconds at which the message has been - // accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") -) - -// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming -// to the "messaging.eventhubs.consumer.group" semantic conventions. It -// represents the name of the consumer group the event consumer is associated -// with. -func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { - return MessagingEventhubsConsumerGroupKey.String(val) -} - -// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.eventhubs.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the - // "network.peer.port" semantic conventions. It represents the peer port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // application layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - // Note: The value SHOULD be normalized to lowercase. - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // actual version of the protocol used for network communication. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.1', '2' - // Note: If protocol version is subject to negotiation (for example using - // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute - // SHOULD be set to the negotiated version. If the actual protocol version - // is not known, this attribute SHOULD NOT be set. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // transport layer](https://osi-model.com/transport-layer/) or - // [inter-process communication - // method](https://wikipedia.org/wiki/Inter-process_communication). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port - // 12345. - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI network - // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - // Note: The value SHOULD be normalized to lowercase. - NetworkTypeKey = attribute.Key("network.type") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // transmit - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local -// address of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port -// number of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the actual -// version of the protocol used for network communication. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// An OCI image manifest. -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// Attributes used by the OpenTracing Shim layer. -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// Attributes reserved for OpenTelemetry -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// An operating system process. -const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessContextSwitchTypeKey is the attribute Key conforming to the - // "process.context_switch_type" semantic conventions. It represents the - // specifies whether the context switches for this data point were - // voluntary or involuntary. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") - - // ProcessCreationTimeKey is the attribute Key conforming to the - // "process.creation.time" semantic conventions. It represents the date and - // time the process was created, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:25:34.853Z' - ProcessCreationTimeKey = attribute.Key("process.creation.time") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessExitCodeKey is the attribute Key conforming to the - // "process.exit.code" semantic conventions. It represents the exit code of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 127 - ProcessExitCodeKey = attribute.Key("process.exit.code") - - // ProcessExitTimeKey is the attribute Key conforming to the - // "process.exit.time" semantic conventions. It represents the date and - // time the process exited, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:26:12.315Z' - ProcessExitTimeKey = attribute.Key("process.exit.time") - - // ProcessGroupLeaderPIDKey is the attribute Key conforming to the - // "process.group_leader.pid" semantic conventions. It represents the PID - // of the process's group leader. This is also the process group ID (PGID) - // of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 23 - ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") - - // ProcessInteractiveKey is the attribute Key conforming to the - // "process.interactive" semantic conventions. It represents the whether - // the process is connected to an interactive shell. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - ProcessInteractiveKey = attribute.Key("process.interactive") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessPagingFaultTypeKey is the attribute Key conforming to the - // "process.paging.fault_type" semantic conventions. It represents the type - // of page fault for this data point. Type `major` is for major/hard page - // faults, and `minor` is for minor/soft page faults. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRealUserIDKey is the attribute Key conforming to the - // "process.real_user.id" semantic conventions. It represents the real user - // ID (RUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000 - ProcessRealUserIDKey = attribute.Key("process.real_user.id") - - // ProcessRealUserNameKey is the attribute Key conforming to the - // "process.real_user.name" semantic conventions. It represents the - // username of the real user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessRealUserNameKey = attribute.Key("process.real_user.name") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessSavedUserIDKey is the attribute Key conforming to the - // "process.saved_user.id" semantic conventions. It represents the saved - // user ID (SUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1002 - ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") - - // ProcessSavedUserNameKey is the attribute Key conforming to the - // "process.saved_user.name" semantic conventions. It represents the - // username of the saved user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") - - // ProcessSessionLeaderPIDKey is the attribute Key conforming to the - // "process.session_leader.pid" semantic conventions. It represents the PID - // of the process's session leader. This is also the session ID (SID) of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 14 - ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") - - // ProcessUserIDKey is the attribute Key conforming to the - // "process.user.id" semantic conventions. It represents the effective user - // ID (EUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1001 - ProcessUserIDKey = attribute.Key("process.user.id") - - // ProcessUserNameKey is the attribute Key conforming to the - // "process.user.name" semantic conventions. It represents the username of - // the effective user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessUserNameKey = attribute.Key("process.user.name") - - // ProcessVpidKey is the attribute Key conforming to the "process.vpid" - // semantic conventions. It represents the virtual process identifier. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12 - // Note: The process ID within a PID namespace. This is not necessarily - // unique across all processes on the host but it is unique within the - // process namespace that the process exists within. - ProcessVpidKey = attribute.Key("process.vpid") -) - -var ( - // voluntary - ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") - // involuntary - ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") -) - -var ( - // major - ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") - // minor - ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") -) - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCreationTime returns an attribute KeyValue conforming to the -// "process.creation.time" semantic conventions. It represents the date and -// time the process was created, in ISO 8601 format. -func ProcessCreationTime(val string) attribute.KeyValue { - return ProcessCreationTimeKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessExitCode returns an attribute KeyValue conforming to the -// "process.exit.code" semantic conventions. It represents the exit code of the -// process. -func ProcessExitCode(val int) attribute.KeyValue { - return ProcessExitCodeKey.Int(val) -} - -// ProcessExitTime returns an attribute KeyValue conforming to the -// "process.exit.time" semantic conventions. It represents the date and time -// the process exited, in ISO 8601 format. -func ProcessExitTime(val string) attribute.KeyValue { - return ProcessExitTimeKey.String(val) -} - -// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the -// "process.group_leader.pid" semantic conventions. It represents the PID of -// the process's group leader. This is also the process group ID (PGID) of the -// process. -func ProcessGroupLeaderPID(val int) attribute.KeyValue { - return ProcessGroupLeaderPIDKey.Int(val) -} - -// ProcessInteractive returns an attribute KeyValue conforming to the -// "process.interactive" semantic conventions. It represents the whether the -// process is connected to an interactive shell. -func ProcessInteractive(val bool) attribute.KeyValue { - return ProcessInteractiveKey.Bool(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRealUserID returns an attribute KeyValue conforming to the -// "process.real_user.id" semantic conventions. It represents the real user ID -// (RUID) of the process. -func ProcessRealUserID(val int) attribute.KeyValue { - return ProcessRealUserIDKey.Int(val) -} - -// ProcessRealUserName returns an attribute KeyValue conforming to the -// "process.real_user.name" semantic conventions. It represents the username of -// the real user of the process. -func ProcessRealUserName(val string) attribute.KeyValue { - return ProcessRealUserNameKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessSavedUserID returns an attribute KeyValue conforming to the -// "process.saved_user.id" semantic conventions. It represents the saved user -// ID (SUID) of the process. -func ProcessSavedUserID(val int) attribute.KeyValue { - return ProcessSavedUserIDKey.Int(val) -} - -// ProcessSavedUserName returns an attribute KeyValue conforming to the -// "process.saved_user.name" semantic conventions. It represents the username -// of the saved user. -func ProcessSavedUserName(val string) attribute.KeyValue { - return ProcessSavedUserNameKey.String(val) -} - -// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the -// "process.session_leader.pid" semantic conventions. It represents the PID of -// the process's session leader. This is also the session ID (SID) of the -// process. -func ProcessSessionLeaderPID(val int) attribute.KeyValue { - return ProcessSessionLeaderPIDKey.Int(val) -} - -// ProcessUserID returns an attribute KeyValue conforming to the -// "process.user.id" semantic conventions. It represents the effective user ID -// (EUID) of the process. -func ProcessUserID(val int) attribute.KeyValue { - return ProcessUserIDKey.Int(val) -} - -// ProcessUserName returns an attribute KeyValue conforming to the -// "process.user.name" semantic conventions. It represents the username of the -// effective user of the process. -func ProcessUserName(val string) attribute.KeyValue { - return ProcessUserNameKey.String(val) -} - -// ProcessVpid returns an attribute KeyValue conforming to the -// "process.vpid" semantic conventions. It represents the virtual process -// identifier. -func ProcessVpid(val int) attribute.KeyValue { - return ProcessVpidKey.Int(val) -} - -// Attributes for process CPU -const ( - // ProcessCPUStateKey is the attribute Key conforming to the - // "process.cpu.state" semantic conventions. It represents the CPU state of - // the process. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessCPUStateKey = attribute.Key("process.cpu.state") -) - -var ( - // system - ProcessCPUStateSystem = ProcessCPUStateKey.String("system") - // user - ProcessCPUStateUser = ProcessCPUStateKey.String("user") - // wait - ProcessCPUStateWait = ProcessCPUStateKey.String("wait") -) - -// Attributes for remote procedure calls. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMessageCompressedSizeKey is the attribute Key conforming to the - // "rpc.message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") - - // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Note: This way we guarantee that the values will be consistent between - // different implementations. - RPCMessageIDKey = attribute.Key("rpc.message.id") - - // RPCMessageTypeKey is the attribute Key conforming to the - // "rpc.message.type" semantic conventions. It represents the whether this - // is a received or sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCMessageTypeKey = attribute.Key("rpc.message.type") - - // RPCMessageUncompressedSizeKey is the attribute Key conforming to the - // "rpc.message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCSystemKey = attribute.Key("rpc.system") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -var ( - // sent - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - // received - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// doesn't specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMessageCompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.compressed_size" semantic conventions. It represents the -// compressed size of the message in bytes. -func RPCMessageCompressedSize(val int) attribute.KeyValue { - return RPCMessageCompressedSizeKey.Int(val) -} - -// RPCMessageID returns an attribute KeyValue conforming to the -// "rpc.message.id" semantic conventions. It represents the mUST be calculated -// as two different counters starting from `1` one for sent messages and one -// for received message. -func RPCMessageID(val int) attribute.KeyValue { - return RPCMessageIDKey.Int(val) -} - -// RPCMessageUncompressedSize returns an attribute KeyValue conforming to -// the "rpc.message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func RPCMessageUncompressedSize(val int) attribute.KeyValue { - return RPCMessageUncompressedSizeKey.Int(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.address` SHOULD represent the server address - // behind any intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.port` SHOULD represent the server port behind - // any intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the server domain name -// if available without reverse DNS lookup; otherwise, IP address or Unix -// domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// A service instance. -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to - // distinguish instances of the same service that exist at the same time - // (e.g. instances of a horizontally scaled - // service). - // - // Implementations, such as SDKs, are recommended to generate a random - // Version 1 or Version 4 [RFC - // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an - // inherent unique ID as the source of - // this value if stability is desirable. In that case, the ID SHOULD be - // used as source of a UUID Version 5 and - // SHOULD use the following UUID as the namespace: - // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. - // - // UUIDs are typically recommended, as only an opaque value for the - // purposes of identifying a service instance is - // needed. Similar to what can be seen in the man page for the - // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) - // file, the underlying - // data, such as pod name and namespace should be treated as confidential, - // being the user's choice to expose it - // or not via another resource attribute. - // - // For applications running behind an application server (like unicorn), we - // do not recommend using one identifier - // for all processes participating in the application. Instead, it's - // recommended each division (e.g. a worker - // thread in unicorn) to have its own instance.id. - // - // It's not recommended for a Collector to set `service.instance.id` if it - // can't unambiguously determine the - // service instance that is generating that telemetry. For instance, - // creating an UUID based on `pod.name` will - // likely be wrong, as the Collector might not know from which container - // within that pod the telemetry originated. - // However, Collectors can set the `service.instance.id` if they can - // unambiguously determine the service instance - // for that telemetry. This is typically the case for scraping receivers, - // as they know the target address and - // port. - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If - // `process.executable.name` is not available, the value MUST be set to - // `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} - -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Describes System attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// Describes System CPU attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// Describes System Memory attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Describes System Memory Paging attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Describes Filesystem attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process attributes -const ( - // SystemProcessStatusKey is the attribute Key conforming to the - // "system.process.status" semantic conventions. It represents the process - // state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessStatusKey = attribute.Key("system.process.status") -) - -var ( - // running - SystemProcessStatusRunning = SystemProcessStatusKey.String("running") - // sleeping - SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") - // stopped - SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") - // defunct - SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") -) - -// Attributes for telemetry SDK. -const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Semantic convention attributes in the TLS namespace. -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" - // semantic conventions. It represents the string indicating the - // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) - // used during the current connection. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', - // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' - // Note: The values allowed for `tls.cipher` MUST be one of the - // `Descriptions` of the [registered TLS Cipher - // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the client. This is - // usually mutually-exclusive of `client.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the client. This is usually mutually-exclusive of - // `client.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the - // "tls.client.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based - // on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the - // date/Time indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientServerNameKey is the attribute Key conforming to the - // "tls.client.server_name" semantic conventions. It represents the also - // called an SNI, this tells the server which hostname to which the client - // is attempting to connect to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry.io' - TLSClientServerNameKey = attribute.Key("tls.client.server_name") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the - // array of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the - // given cipher, when applicable - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'secp256r1' - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the - // "tls.established" semantic conventions. It represents the boolean flag - // indicating if the TLS negotiation was successful and transitioned to an - // encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the - // "tls.next_protocol" semantic conventions. It represents the string - // indicating the protocol being tunneled. Per the values in the [IANA - // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), - // this string should be lower case. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http/1.1' - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the - // "tls.protocol.name" semantic conventions. It represents the normalized - // lowercase protocol name parsed from original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric - // part of the version parsed from the original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2', '3' - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" - // semantic conventions. It represents the boolean flag indicating if this - // TLS connection was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the server. This is - // usually mutually-exclusive of `server.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the server. This is usually mutually-exclusive of - // `server.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the - // "tls.server.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the - // "tls.server.ja3s" semantic conventions. It represents a hash that - // identifies servers based on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the - // date/Time indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // server. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -var ( - // ssl - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the -// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used -// during the current connection. -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also -// exists in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the client. This is usually mutually-exclusive of `client.certificate` since -// that value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the -// "tls.client.ja3" semantic conventions. It represents a hash that identifies -// clients based on how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientServerName returns an attribute KeyValue conforming to the -// "tls.client.server_name" semantic conventions. It represents the also called -// an SNI, this tells the server which hostname to which the client is -// attempting to connect to. -func TLSClientServerName(val string) attribute.KeyValue { - return TLSClientServerNameKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" -// semantic conventions. It represents the string indicating the curve used for -// the given cipher, when applicable -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string -// indicating the protocol being tunneled. Per the values in the [IANA -// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), -// this string should be lower case. -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part -// of the version parsed from the original string of the negotiated [SSL/TLS -// protocol -// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also -// exists in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the server. This is usually mutually-exclusive of `server.certificate` since -// that value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Attributes describing URL. -const ( - // URLDomainKey is the attribute Key conforming to the "url.domain" - // semantic conventions. It represents the domain extracted from the - // `url.full`, such as "opentelemetry.io". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', - // '[1080:0:0:0:8:800:200C:417A]' - // Note: In some cases a URL may refer to an IP and/or port directly, - // without a domain name. In this case, the IP address would go to the - // domain field. If the URL contains a [literal IPv6 - // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by - // `[` and `]`, the `[` and `]` characters should also be captured in the - // domain field. - URLDomainKey = attribute.Key("url.domain") - - // URLExtensionKey is the attribute Key conforming to the "url.extension" - // semantic conventions. It represents the file extension extracted from - // the `url.full`, excluding the leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: The file extension is only set if it exists, as not every url has - // a file extension. When the file name has multiple extensions - // `example.tar.gz`, only the last one should be captured `gz`, not - // `tar.gz`. - URLExtensionKey = attribute.Key("url.extension") - - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it SHOULD be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password SHOULD be redacted and attribute's value SHOULD be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed). Sensitive content provided in `url.full` SHOULD be - // scrubbed when instrumentations can identify it. - URLFullKey = attribute.Key("url.full") - - // URLOriginalKey is the attribute Key conforming to the "url.original" - // semantic conventions. It represents the unmodified original URL as seen - // in the event source. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // 'search?q=OpenTelemetry' - // Note: In network monitoring, the observed URL may be a full URL, whereas - // in access logs, the URL is often just represented as a path. This field - // is meant to represent the URL as it was observed, complete or not. - // `url.original` might contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case password and - // username SHOULD NOT be redacted and attribute's value SHOULD remain the - // same. - URLOriginalKey = attribute.Key("url.original") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when - // instrumentations can identify it. - URLPathKey = attribute.Key("url.path") - - // URLPortKey is the attribute Key conforming to the "url.port" semantic - // conventions. It represents the port extracted from the `url.full` - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 443 - URLPortKey = attribute.Key("url.port") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLRegisteredDomainKey is the attribute Key conforming to the - // "url.registered_domain" semantic conventions. It represents the highest - // registered url domain, stripped of the subdomain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.com', 'foo.co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). For example, the registered domain for - // `foo.example.com` is `example.com`. Trying to approximate this by simply - // taking the last two labels will not work well for TLDs such as `co.uk`. - URLRegisteredDomainKey = attribute.Key("url.registered_domain") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") - - // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" - // semantic conventions. It represents the subdomain portion of a fully - // qualified domain name includes all of the names except the host name - // under the registered_domain. In a partially qualified domain, or if the - // qualification level of the full name cannot be determined, subdomain - // contains all of the names below the registered domain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'east', 'sub2.sub1' - // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If - // the domain has multiple levels of subdomain, such as - // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, - // with no trailing period. - URLSubdomainKey = attribute.Key("url.subdomain") - - // URLTemplateKey is the attribute Key conforming to the "url.template" - // semantic conventions. It represents the low-cardinality template of an - // [absolute path - // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/users/{id}', '/users/:id', '/users?id={id}' - URLTemplateKey = attribute.Key("url.template") - - // URLTopLevelDomainKey is the attribute Key conforming to the - // "url.top_level_domain" semantic conventions. It represents the effective - // top level domain (eTLD), also known as the domain suffix, is the last - // part of the domain name. For example, the top level domain for - // example.com is `com`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com', 'co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). - URLTopLevelDomainKey = attribute.Key("url.top_level_domain") -) - -// URLDomain returns an attribute KeyValue conforming to the "url.domain" -// semantic conventions. It represents the domain extracted from the -// `url.full`, such as "opentelemetry.io". -func URLDomain(val string) attribute.KeyValue { - return URLDomainKey.String(val) -} - -// URLExtension returns an attribute KeyValue conforming to the -// "url.extension" semantic conventions. It represents the file extension -// extracted from the `url.full`, excluding the leading dot. -func URLExtension(val string) attribute.KeyValue { - return URLExtensionKey.String(val) -} - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLOriginal returns an attribute KeyValue conforming to the -// "url.original" semantic conventions. It represents the unmodified original -// URL as seen in the event source. -func URLOriginal(val string) attribute.KeyValue { - return URLOriginalKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLPort returns an attribute KeyValue conforming to the "url.port" -// semantic conventions. It represents the port extracted from the `url.full` -func URLPort(val int) attribute.KeyValue { - return URLPortKey.Int(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLRegisteredDomain returns an attribute KeyValue conforming to the -// "url.registered_domain" semantic conventions. It represents the highest -// registered url domain, stripped of the subdomain. -func URLRegisteredDomain(val string) attribute.KeyValue { - return URLRegisteredDomainKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// URLSubdomain returns an attribute KeyValue conforming to the -// "url.subdomain" semantic conventions. It represents the subdomain portion of -// a fully qualified domain name includes all of the names except the host name -// under the registered_domain. In a partially qualified domain, or if the -// qualification level of the full name cannot be determined, subdomain -// contains all of the names below the registered domain. -func URLSubdomain(val string) attribute.KeyValue { - return URLSubdomainKey.String(val) -} - -// URLTemplate returns an attribute KeyValue conforming to the -// "url.template" semantic conventions. It represents the low-cardinality -// template of an [absolute path -// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). -func URLTemplate(val string) attribute.KeyValue { - return URLTemplateKey.String(val) -} - -// URLTopLevelDomain returns an attribute KeyValue conforming to the -// "url.top_level_domain" semantic conventions. It represents the effective top -// level domain (eTLD), also known as the domain suffix, is the last part of -// the domain name. For example, the top level domain for example.com is `com`. -func URLTopLevelDomain(val string) attribute.KeyValue { - return URLTopLevelDomainKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentNameKey is the attribute Key conforming to the - // "user_agent.name" semantic conventions. It represents the name of the - // user-agent extracted from original. Usually refers to the browser's - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Safari', 'YourApp' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's name - // from original string. In the case of using a user-agent for non-browser - // products, such as microservices with multiple names/versions inside the - // `user_agent.original`, the most significant name SHOULD be selected. In - // such a scenario it should align with `user_agent.version` - UserAgentNameKey = attribute.Key("user_agent.name") - - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 - // grpc-java-okhttp/1.27.2' - UserAgentOriginalKey = attribute.Key("user_agent.original") - - // UserAgentVersionKey is the attribute Key conforming to the - // "user_agent.version" semantic conventions. It represents the version of - // the user-agent extracted from original. Usually refers to the browser's - // version - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.1.2', '1.0.0' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's - // version from original string. In the case of using a user-agent for - // non-browser products, such as microservices with multiple names/versions - // inside the `user_agent.original`, the most significant version SHOULD be - // selected. In such a scenario it should align with `user_agent.name` - UserAgentVersionKey = attribute.Key("user_agent.version") -) - -// UserAgentName returns an attribute KeyValue conforming to the -// "user_agent.name" semantic conventions. It represents the name of the -// user-agent extracted from original. Usually refers to the browser's name. -func UserAgentName(val string) attribute.KeyValue { - return UserAgentNameKey.String(val) -} - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// UserAgentVersion returns an attribute KeyValue conforming to the -// "user_agent.version" semantic conventions. It represents the version of the -// user-agent extracted from original. Usually refers to the browser's version -func UserAgentVersion(val string) attribute.KeyValue { - return UserAgentVersionKey.String(val) -} - -// The attributes used to describe the packaged software running the -// application code. -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go deleted file mode 100644 index fcdb9f48596..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go +++ /dev/null @@ -1,1307 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -const ( - - // ContainerCPUTime is the metric conforming to the "container.cpu.time" - // semantic conventions. It represents the total CPU time consumed. - // Instrument: counter - // Unit: s - // Stability: Experimental - ContainerCPUTimeName = "container.cpu.time" - ContainerCPUTimeUnit = "s" - ContainerCPUTimeDescription = "Total CPU time consumed" - - // ContainerMemoryUsage is the metric conforming to the - // "container.memory.usage" semantic conventions. It represents the memory - // usage of the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerMemoryUsageName = "container.memory.usage" - ContainerMemoryUsageUnit = "By" - ContainerMemoryUsageDescription = "Memory usage of the container." - - // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic - // conventions. It represents the disk bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerDiskIoName = "container.disk.io" - ContainerDiskIoUnit = "By" - ContainerDiskIoDescription = "Disk bytes for the container." - - // ContainerNetworkIo is the metric conforming to the "container.network.io" - // semantic conventions. It represents the network bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerNetworkIoName = "container.network.io" - ContainerNetworkIoUnit = "By" - ContainerNetworkIoDescription = "Network bytes for the container." - - // DBClientOperationDuration is the metric conforming to the - // "db.client.operation.duration" semantic conventions. It represents the - // duration of database client operations. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientOperationDurationName = "db.client.operation.duration" - DBClientOperationDurationUnit = "s" - DBClientOperationDurationDescription = "Duration of database client operations." - - // DBClientConnectionCount is the metric conforming to the - // "db.client.connection.count" semantic conventions. It represents the number - // of connections that are currently in state described by the `state` - // attribute. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionCountName = "db.client.connection.count" - DBClientConnectionCountUnit = "{connection}" - DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" - - // DBClientConnectionIdleMax is the metric conforming to the - // "db.client.connection.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMaxName = "db.client.connection.idle.max" - DBClientConnectionIdleMaxUnit = "{connection}" - DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" - - // DBClientConnectionIdleMin is the metric conforming to the - // "db.client.connection.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMinName = "db.client.connection.idle.min" - DBClientConnectionIdleMinUnit = "{connection}" - DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" - - // DBClientConnectionMax is the metric conforming to the - // "db.client.connection.max" semantic conventions. It represents the maximum - // number of open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionMaxName = "db.client.connection.max" - DBClientConnectionMaxUnit = "{connection}" - DBClientConnectionMaxDescription = "The maximum number of open connections allowed" - - // DBClientConnectionPendingRequests is the metric conforming to the - // "db.client.connection.pending_requests" semantic conventions. It represents - // the number of pending requests for an open connection, cumulative for the - // entire pool. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" - DBClientConnectionPendingRequestsUnit = "{request}" - DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" - - // DBClientConnectionTimeouts is the metric conforming to the - // "db.client.connection.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionTimeoutsName = "db.client.connection.timeouts" - DBClientConnectionTimeoutsUnit = "{timeout}" - DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" - - // DBClientConnectionCreateTime is the metric conforming to the - // "db.client.connection.create_time" semantic conventions. It represents the - // time it took to create a new connection. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionCreateTimeName = "db.client.connection.create_time" - DBClientConnectionCreateTimeUnit = "s" - DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" - - // DBClientConnectionWaitTime is the metric conforming to the - // "db.client.connection.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionWaitTimeName = "db.client.connection.wait_time" - DBClientConnectionWaitTimeUnit = "s" - DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" - - // DBClientConnectionUseTime is the metric conforming to the - // "db.client.connection.use_time" semantic conventions. It represents the time - // between borrowing a connection and returning it to the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionUseTimeName = "db.client.connection.use_time" - DBClientConnectionUseTimeUnit = "s" - DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" - - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the - // deprecated, use `db.client.connection.count` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsUsageName = "db.client.connections.usage" - DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." - - // DBClientConnectionsIdleMax is the metric conforming to the - // "db.client.connections.idle.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" - DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." - - // DBClientConnectionsIdleMin is the metric conforming to the - // "db.client.connections.idle.min" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.min` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMinName = "db.client.connections.idle.min" - DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." - - // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsMaxName = "db.client.connections.max" - DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." - - // DBClientConnectionsPendingRequests is the metric conforming to the - // "db.client.connections.pending_requests" semantic conventions. It represents - // the deprecated, use `db.client.connection.pending_requests` instead. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" - DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." - - // DBClientConnectionsTimeouts is the metric conforming to the - // "db.client.connections.timeouts" semantic conventions. It represents the - // deprecated, use `db.client.connection.timeouts` instead. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" - DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." - - // DBClientConnectionsCreateTime is the metric conforming to the - // "db.client.connections.create_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.create_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsCreateTimeName = "db.client.connections.create_time" - DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsWaitTime is the metric conforming to the - // "db.client.connections.wait_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.wait_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" - DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsUseTime is the metric conforming to the - // "db.client.connections.use_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.use_time` instead. Note: the unit also - // changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsUseTimeName = "db.client.connections.use_time" - DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." - - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - - // AspnetcoreRoutingMatchAttempts is the metric conforming to the - // "aspnetcore.routing.match_attempts" semantic conventions. It represents the - // number of requests that were attempted to be matched to an endpoint. - // Instrument: counter - // Unit: {match_attempt} - // Stability: Stable - AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" - AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" - AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." - - // AspnetcoreDiagnosticsExceptions is the metric conforming to the - // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the - // number of exceptions caught by exception handling middleware. - // Instrument: counter - // Unit: {exception} - // Stability: Stable - AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" - AspnetcoreDiagnosticsExceptionsUnit = "{exception}" - AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." - - // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the - // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It - // represents the number of requests that are currently active on the server - // that hold a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" - AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" - AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." - - // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the - // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It - // represents the duration of rate limiting lease held by requests on the - // server. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" - AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" - AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." - - // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the - // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It - // represents the time the request spent in a queue waiting to acquire a rate - // limiting lease. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" - AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" - AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the - // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It - // represents the number of requests that are currently queued, waiting to - // acquire a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" - AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" - AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingRequests is the metric conforming to the - // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the - // number of requests that tried to acquire a rate limiting lease. - // Instrument: counter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" - AspnetcoreRateLimitingRequestsUnit = "{request}" - AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." - - // KestrelActiveConnections is the metric conforming to the - // "kestrel.active_connections" semantic conventions. It represents the number - // of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelActiveConnectionsName = "kestrel.active_connections" - KestrelActiveConnectionsUnit = "{connection}" - KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // KestrelConnectionDuration is the metric conforming to the - // "kestrel.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelConnectionDurationName = "kestrel.connection.duration" - KestrelConnectionDurationUnit = "s" - KestrelConnectionDurationDescription = "The duration of connections on the server." - - // KestrelRejectedConnections is the metric conforming to the - // "kestrel.rejected_connections" semantic conventions. It represents the - // number of connections rejected by the server. - // Instrument: counter - // Unit: {connection} - // Stability: Stable - KestrelRejectedConnectionsName = "kestrel.rejected_connections" - KestrelRejectedConnectionsUnit = "{connection}" - KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." - - // KestrelQueuedConnections is the metric conforming to the - // "kestrel.queued_connections" semantic conventions. It represents the number - // of connections that are currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelQueuedConnectionsName = "kestrel.queued_connections" - KestrelQueuedConnectionsUnit = "{connection}" - KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." - - // KestrelQueuedRequests is the metric conforming to the - // "kestrel.queued_requests" semantic conventions. It represents the number of - // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are - // currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - KestrelQueuedRequestsName = "kestrel.queued_requests" - KestrelQueuedRequestsUnit = "{request}" - KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." - - // KestrelUpgradedConnections is the metric conforming to the - // "kestrel.upgraded_connections" semantic conventions. It represents the - // number of connections that are currently upgraded (WebSockets). . - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" - KestrelUpgradedConnectionsUnit = "{connection}" - KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." - - // KestrelTLSHandshakeDuration is the metric conforming to the - // "kestrel.tls_handshake.duration" semantic conventions. It represents the - // duration of TLS handshakes on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" - KestrelTLSHandshakeDurationUnit = "s" - KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." - - // KestrelActiveTLSHandshakes is the metric conforming to the - // "kestrel.active_tls_handshakes" semantic conventions. It represents the - // number of TLS handshakes that are currently in progress on the server. - // Instrument: updowncounter - // Unit: {handshake} - // Stability: Stable - KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" - KestrelActiveTLSHandshakesUnit = "{handshake}" - KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." - - // SignalrServerConnectionDuration is the metric conforming to the - // "signalr.server.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - SignalrServerConnectionDurationName = "signalr.server.connection.duration" - SignalrServerConnectionDurationUnit = "s" - SignalrServerConnectionDurationDescription = "The duration of connections on the server." - - // SignalrServerActiveConnections is the metric conforming to the - // "signalr.server.active_connections" semantic conventions. It represents the - // number of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - SignalrServerActiveConnectionsName = "signalr.server.active_connections" - SignalrServerActiveConnectionsUnit = "{connection}" - SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" - // semantic conventions. It represents the measures the duration of the - // function's logic execution. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInvokeDurationName = "faas.invoke_duration" - FaaSInvokeDurationUnit = "s" - FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" - - // FaaSInitDuration is the metric conforming to the "faas.init_duration" - // semantic conventions. It represents the measures the duration of the - // function's initialization, such as a cold start. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInitDurationName = "faas.init_duration" - FaaSInitDurationUnit = "s" - FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" - - // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic - // conventions. It represents the number of invocation cold starts. - // Instrument: counter - // Unit: {coldstart} - // Stability: Experimental - FaaSColdstartsName = "faas.coldstarts" - FaaSColdstartsUnit = "{coldstart}" - FaaSColdstartsDescription = "Number of invocation cold starts" - - // FaaSErrors is the metric conforming to the "faas.errors" semantic - // conventions. It represents the number of invocation errors. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - FaaSErrorsName = "faas.errors" - FaaSErrorsUnit = "{error}" - FaaSErrorsDescription = "Number of invocation errors" - - // FaaSInvocations is the metric conforming to the "faas.invocations" semantic - // conventions. It represents the number of successful invocations. - // Instrument: counter - // Unit: {invocation} - // Stability: Experimental - FaaSInvocationsName = "faas.invocations" - FaaSInvocationsUnit = "{invocation}" - FaaSInvocationsDescription = "Number of successful invocations" - - // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic - // conventions. It represents the number of invocation timeouts. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - FaaSTimeoutsName = "faas.timeouts" - FaaSTimeoutsUnit = "{timeout}" - FaaSTimeoutsDescription = "Number of invocation timeouts" - - // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic - // conventions. It represents the distribution of max memory usage per - // invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSMemUsageName = "faas.mem_usage" - FaaSMemUsageUnit = "By" - FaaSMemUsageDescription = "Distribution of max memory usage per invocation" - - // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic - // conventions. It represents the distribution of CPU usage per invocation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSCPUUsageName = "faas.cpu_usage" - FaaSCPUUsageUnit = "s" - FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" - - // FaaSNetIo is the metric conforming to the "faas.net_io" semantic - // conventions. It represents the distribution of net I/O usage per invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSNetIoName = "faas.net_io" - FaaSNetIoUnit = "By" - FaaSNetIoDescription = "Distribution of net I/O usage per invocation" - - // HTTPServerRequestDuration is the metric conforming to the - // "http.server.request.duration" semantic conventions. It represents the - // duration of HTTP server requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPServerRequestDurationName = "http.server.request.duration" - HTTPServerRequestDurationUnit = "s" - HTTPServerRequestDurationDescription = "Duration of HTTP server requests." - - // HTTPServerActiveRequests is the metric conforming to the - // "http.server.active_requests" semantic conventions. It represents the number - // of active HTTP server requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPServerActiveRequestsName = "http.server.active_requests" - HTTPServerActiveRequestsUnit = "{request}" - HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." - - // HTTPServerRequestBodySize is the metric conforming to the - // "http.server.request.body.size" semantic conventions. It represents the size - // of HTTP server request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerRequestBodySizeName = "http.server.request.body.size" - HTTPServerRequestBodySizeUnit = "By" - HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." - - // HTTPServerResponseBodySize is the metric conforming to the - // "http.server.response.body.size" semantic conventions. It represents the - // size of HTTP server response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerResponseBodySizeName = "http.server.response.body.size" - HTTPServerResponseBodySizeUnit = "By" - HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." - - // HTTPClientRequestDuration is the metric conforming to the - // "http.client.request.duration" semantic conventions. It represents the - // duration of HTTP client requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPClientRequestDurationName = "http.client.request.duration" - HTTPClientRequestDurationUnit = "s" - HTTPClientRequestDurationDescription = "Duration of HTTP client requests." - - // HTTPClientRequestBodySize is the metric conforming to the - // "http.client.request.body.size" semantic conventions. It represents the size - // of HTTP client request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientRequestBodySizeName = "http.client.request.body.size" - HTTPClientRequestBodySizeUnit = "By" - HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." - - // HTTPClientResponseBodySize is the metric conforming to the - // "http.client.response.body.size" semantic conventions. It represents the - // size of HTTP client response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientResponseBodySizeName = "http.client.response.body.size" - HTTPClientResponseBodySizeUnit = "By" - HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." - - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - - // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic - // conventions. It represents the measure of initial memory requested. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmMemoryInitName = "jvm.memory.init" - JvmMemoryInitUnit = "By" - JvmMemoryInitDescription = "Measure of initial memory requested." - - // JvmSystemCPUUtilization is the metric conforming to the - // "jvm.system.cpu.utilization" semantic conventions. It represents the recent - // CPU utilization for the whole system as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" - JvmSystemCPUUtilizationUnit = "1" - JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." - - // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" - // semantic conventions. It represents the average CPU load of the whole system - // for the last minute as reported by the JVM. - // Instrument: gauge - // Unit: {run_queue_item} - // Stability: Experimental - JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" - JvmSystemCPULoad1mUnit = "{run_queue_item}" - JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." - - // JvmBufferMemoryUsage is the metric conforming to the - // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of - // memory used by buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" - JvmBufferMemoryUsageUnit = "By" - JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." - - // JvmBufferMemoryLimit is the metric conforming to the - // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of - // total memory capacity of buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" - JvmBufferMemoryLimitUnit = "By" - JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." - - // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic - // conventions. It represents the number of buffers in the pool. - // Instrument: updowncounter - // Unit: {buffer} - // Stability: Experimental - JvmBufferCountName = "jvm.buffer.count" - JvmBufferCountUnit = "{buffer}" - JvmBufferCountDescription = "Number of buffers in the pool." - - // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic - // conventions. It represents the measure of memory used. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedName = "jvm.memory.used" - JvmMemoryUsedUnit = "By" - JvmMemoryUsedDescription = "Measure of memory used." - - // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" - // semantic conventions. It represents the measure of memory committed. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryCommittedName = "jvm.memory.committed" - JvmMemoryCommittedUnit = "By" - JvmMemoryCommittedDescription = "Measure of memory committed." - - // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic - // conventions. It represents the measure of max obtainable memory. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryLimitName = "jvm.memory.limit" - JvmMemoryLimitUnit = "By" - JvmMemoryLimitDescription = "Measure of max obtainable memory." - - // JvmMemoryUsedAfterLastGc is the metric conforming to the - // "jvm.memory.used_after_last_gc" semantic conventions. It represents the - // measure of memory used, as measured after the most recent garbage collection - // event on this pool. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" - JvmMemoryUsedAfterLastGcUnit = "By" - JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." - - // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic - // conventions. It represents the duration of JVM garbage collection actions. - // Instrument: histogram - // Unit: s - // Stability: Stable - JvmGcDurationName = "jvm.gc.duration" - JvmGcDurationUnit = "s" - JvmGcDurationDescription = "Duration of JVM garbage collection actions." - - // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic - // conventions. It represents the number of executing platform threads. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Stable - JvmThreadCountName = "jvm.thread.count" - JvmThreadCountUnit = "{thread}" - JvmThreadCountDescription = "Number of executing platform threads." - - // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic - // conventions. It represents the number of classes loaded since JVM start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassLoadedName = "jvm.class.loaded" - JvmClassLoadedUnit = "{class}" - JvmClassLoadedDescription = "Number of classes loaded since JVM start." - - // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" - // semantic conventions. It represents the number of classes unloaded since JVM - // start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassUnloadedName = "jvm.class.unloaded" - JvmClassUnloadedUnit = "{class}" - JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." - - // JvmClassCount is the metric conforming to the "jvm.class.count" semantic - // conventions. It represents the number of classes currently loaded. - // Instrument: updowncounter - // Unit: {class} - // Stability: Stable - JvmClassCountName = "jvm.class.count" - JvmClassCountUnit = "{class}" - JvmClassCountDescription = "Number of classes currently loaded." - - // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic - // conventions. It represents the number of processors available to the Java - // virtual machine. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Stable - JvmCPUCountName = "jvm.cpu.count" - JvmCPUCountUnit = "{cpu}" - JvmCPUCountDescription = "Number of processors available to the Java virtual machine." - - // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic - // conventions. It represents the cPU time used by the process as reported by - // the JVM. - // Instrument: counter - // Unit: s - // Stability: Stable - JvmCPUTimeName = "jvm.cpu.time" - JvmCPUTimeUnit = "s" - JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." - - // JvmCPURecentUtilization is the metric conforming to the - // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent - // CPU utilization for the process as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Stable - JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" - JvmCPURecentUtilizationUnit = "1" - JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." - - // MessagingPublishDuration is the metric conforming to the - // "messaging.publish.duration" semantic conventions. It represents the - // measures the duration of publish operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingPublishDurationName = "messaging.publish.duration" - MessagingPublishDurationUnit = "s" - MessagingPublishDurationDescription = "Measures the duration of publish operation." - - // MessagingReceiveDuration is the metric conforming to the - // "messaging.receive.duration" semantic conventions. It represents the - // measures the duration of receive operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingReceiveDurationName = "messaging.receive.duration" - MessagingReceiveDurationUnit = "s" - MessagingReceiveDurationDescription = "Measures the duration of receive operation." - - // MessagingProcessDuration is the metric conforming to the - // "messaging.process.duration" semantic conventions. It represents the - // measures the duration of process operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingProcessDurationName = "messaging.process.duration" - MessagingProcessDurationUnit = "s" - MessagingProcessDurationDescription = "Measures the duration of process operation." - - // MessagingPublishMessages is the metric conforming to the - // "messaging.publish.messages" semantic conventions. It represents the - // measures the number of published messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingPublishMessagesName = "messaging.publish.messages" - MessagingPublishMessagesUnit = "{message}" - MessagingPublishMessagesDescription = "Measures the number of published messages." - - // MessagingReceiveMessages is the metric conforming to the - // "messaging.receive.messages" semantic conventions. It represents the - // measures the number of received messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingReceiveMessagesName = "messaging.receive.messages" - MessagingReceiveMessagesUnit = "{message}" - MessagingReceiveMessagesDescription = "Measures the number of received messages." - - // MessagingProcessMessages is the metric conforming to the - // "messaging.process.messages" semantic conventions. It represents the - // measures the number of processed messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingProcessMessagesName = "messaging.process.messages" - MessagingProcessMessagesUnit = "{message}" - MessagingProcessMessagesDescription = "Measures the number of processed messages." - - // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic - // conventions. It represents the total CPU seconds broken down by different - // states. - // Instrument: counter - // Unit: s - // Stability: Experimental - ProcessCPUTimeName = "process.cpu.time" - ProcessCPUTimeUnit = "s" - ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." - - // ProcessCPUUtilization is the metric conforming to the - // "process.cpu.utilization" semantic conventions. It represents the difference - // in process.cpu.time since the last measurement, divided by the elapsed time - // and number of CPUs available to the process. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - ProcessCPUUtilizationName = "process.cpu.utilization" - ProcessCPUUtilizationUnit = "1" - ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." - - // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" - // semantic conventions. It represents the amount of physical memory in use. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryUsageName = "process.memory.usage" - ProcessMemoryUsageUnit = "By" - ProcessMemoryUsageDescription = "The amount of physical memory in use." - - // ProcessMemoryVirtual is the metric conforming to the - // "process.memory.virtual" semantic conventions. It represents the amount of - // committed virtual memory. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryVirtualName = "process.memory.virtual" - ProcessMemoryVirtualUnit = "By" - ProcessMemoryVirtualDescription = "The amount of committed virtual memory." - - // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic - // conventions. It represents the disk bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessDiskIoName = "process.disk.io" - ProcessDiskIoUnit = "By" - ProcessDiskIoDescription = "Disk bytes transferred." - - // ProcessNetworkIo is the metric conforming to the "process.network.io" - // semantic conventions. It represents the network bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessNetworkIoName = "process.network.io" - ProcessNetworkIoUnit = "By" - ProcessNetworkIoDescription = "Network bytes transferred." - - // ProcessThreadCount is the metric conforming to the "process.thread.count" - // semantic conventions. It represents the process threads count. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Experimental - ProcessThreadCountName = "process.thread.count" - ProcessThreadCountUnit = "{thread}" - ProcessThreadCountDescription = "Process threads count." - - // ProcessOpenFileDescriptorCount is the metric conforming to the - // "process.open_file_descriptor.count" semantic conventions. It represents the - // number of file descriptors in use by the process. - // Instrument: updowncounter - // Unit: {count} - // Stability: Experimental - ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" - ProcessOpenFileDescriptorCountUnit = "{count}" - ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." - - // ProcessContextSwitches is the metric conforming to the - // "process.context_switches" semantic conventions. It represents the number of - // times the process has been context switched. - // Instrument: counter - // Unit: {count} - // Stability: Experimental - ProcessContextSwitchesName = "process.context_switches" - ProcessContextSwitchesUnit = "{count}" - ProcessContextSwitchesDescription = "Number of times the process has been context switched." - - // ProcessPagingFaults is the metric conforming to the "process.paging.faults" - // semantic conventions. It represents the number of page faults the process - // has made. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - ProcessPagingFaultsName = "process.paging.faults" - ProcessPagingFaultsUnit = "{fault}" - ProcessPagingFaultsDescription = "Number of page faults the process has made." - - // RPCServerDuration is the metric conforming to the "rpc.server.duration" - // semantic conventions. It represents the measures the duration of inbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCServerDurationName = "rpc.server.duration" - RPCServerDurationUnit = "ms" - RPCServerDurationDescription = "Measures the duration of inbound RPC." - - // RPCServerRequestSize is the metric conforming to the - // "rpc.server.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerRequestSizeName = "rpc.server.request.size" - RPCServerRequestSizeUnit = "By" - RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCServerResponseSize is the metric conforming to the - // "rpc.server.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerResponseSizeName = "rpc.server.response.size" - RPCServerResponseSizeUnit = "By" - RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCServerRequestsPerRPC is the metric conforming to the - // "rpc.server.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" - RPCServerRequestsPerRPCUnit = "{count}" - RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCServerResponsesPerRPC is the metric conforming to the - // "rpc.server.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" - RPCServerResponsesPerRPCUnit = "{count}" - RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // RPCClientDuration is the metric conforming to the "rpc.client.duration" - // semantic conventions. It represents the measures the duration of outbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCClientDurationName = "rpc.client.duration" - RPCClientDurationUnit = "ms" - RPCClientDurationDescription = "Measures the duration of outbound RPC." - - // RPCClientRequestSize is the metric conforming to the - // "rpc.client.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientRequestSizeName = "rpc.client.request.size" - RPCClientRequestSizeUnit = "By" - RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCClientResponseSize is the metric conforming to the - // "rpc.client.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientResponseSizeName = "rpc.client.response.size" - RPCClientResponseSizeUnit = "By" - RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCClientRequestsPerRPC is the metric conforming to the - // "rpc.client.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" - RPCClientRequestsPerRPCUnit = "{count}" - RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCClientResponsesPerRPC is the metric conforming to the - // "rpc.client.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" - RPCClientResponsesPerRPCUnit = "{count}" - RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic - // conventions. It represents the seconds each logical CPU spent on each mode. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemCPUTimeName = "system.cpu.time" - SystemCPUTimeUnit = "s" - SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" - - // SystemCPUUtilization is the metric conforming to the - // "system.cpu.utilization" semantic conventions. It represents the difference - // in system.cpu.time since the last measurement, divided by the elapsed time - // and number of logical CPUs. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - SystemCPUUtilizationName = "system.cpu.utilization" - SystemCPUUtilizationUnit = "1" - SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" - - // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" - // semantic conventions. It represents the reports the current frequency of the - // CPU in Hz. - // Instrument: gauge - // Unit: {Hz} - // Stability: Experimental - SystemCPUFrequencyName = "system.cpu.frequency" - SystemCPUFrequencyUnit = "{Hz}" - SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" - - // SystemCPUPhysicalCount is the metric conforming to the - // "system.cpu.physical.count" semantic conventions. It represents the reports - // the number of actual physical processor cores on the hardware. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPUPhysicalCountName = "system.cpu.physical.count" - SystemCPUPhysicalCountUnit = "{cpu}" - SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" - - // SystemCPULogicalCount is the metric conforming to the - // "system.cpu.logical.count" semantic conventions. It represents the reports - // the number of logical (virtual) processor cores created by the operating - // system to manage multitasking. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPULogicalCountName = "system.cpu.logical.count" - SystemCPULogicalCountUnit = "{cpu}" - SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" - - // SystemMemoryUsage is the metric conforming to the "system.memory.usage" - // semantic conventions. It represents the reports memory in use by state. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryUsageName = "system.memory.usage" - SystemMemoryUsageUnit = "By" - SystemMemoryUsageDescription = "Reports memory in use by state." - - // SystemMemoryLimit is the metric conforming to the "system.memory.limit" - // semantic conventions. It represents the total memory available in the - // system. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryLimitName = "system.memory.limit" - SystemMemoryLimitUnit = "By" - SystemMemoryLimitDescription = "Total memory available in the system." - - // SystemMemoryShared is the metric conforming to the "system.memory.shared" - // semantic conventions. It represents the shared memory used (mostly by - // tmpfs). - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemorySharedName = "system.memory.shared" - SystemMemorySharedUnit = "By" - SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." - - // SystemMemoryUtilization is the metric conforming to the - // "system.memory.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemMemoryUtilizationName = "system.memory.utilization" - SystemMemoryUtilizationUnit = "1" - - // SystemPagingUsage is the metric conforming to the "system.paging.usage" - // semantic conventions. It represents the unix swap or windows pagefile usage. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemPagingUsageName = "system.paging.usage" - SystemPagingUsageUnit = "By" - SystemPagingUsageDescription = "Unix swap or windows pagefile usage" - - // SystemPagingUtilization is the metric conforming to the - // "system.paging.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingUtilizationName = "system.paging.utilization" - SystemPagingUtilizationUnit = "1" - - // SystemPagingFaults is the metric conforming to the "system.paging.faults" - // semantic conventions. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingFaultsName = "system.paging.faults" - SystemPagingFaultsUnit = "{fault}" - - // SystemPagingOperations is the metric conforming to the - // "system.paging.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingOperationsName = "system.paging.operations" - SystemPagingOperationsUnit = "{operation}" - - // SystemDiskIo is the metric conforming to the "system.disk.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskIoName = "system.disk.io" - SystemDiskIoUnit = "By" - - // SystemDiskOperations is the metric conforming to the - // "system.disk.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskOperationsName = "system.disk.operations" - SystemDiskOperationsUnit = "{operation}" - - // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" - // semantic conventions. It represents the time disk spent activated. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskIoTimeName = "system.disk.io_time" - SystemDiskIoTimeUnit = "s" - SystemDiskIoTimeDescription = "Time disk spent activated" - - // SystemDiskOperationTime is the metric conforming to the - // "system.disk.operation_time" semantic conventions. It represents the sum of - // the time each operation took to complete. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskOperationTimeName = "system.disk.operation_time" - SystemDiskOperationTimeUnit = "s" - SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" - - // SystemDiskMerged is the metric conforming to the "system.disk.merged" - // semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskMergedName = "system.disk.merged" - SystemDiskMergedUnit = "{operation}" - - // SystemFilesystemUsage is the metric conforming to the - // "system.filesystem.usage" semantic conventions. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUsageName = "system.filesystem.usage" - SystemFilesystemUsageUnit = "By" - - // SystemFilesystemUtilization is the metric conforming to the - // "system.filesystem.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUtilizationName = "system.filesystem.utilization" - SystemFilesystemUtilizationUnit = "1" - - // SystemNetworkDropped is the metric conforming to the - // "system.network.dropped" semantic conventions. It represents the count of - // packets that are dropped or discarded even though there was no error. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - SystemNetworkDroppedName = "system.network.dropped" - SystemNetworkDroppedUnit = "{packet}" - SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" - - // SystemNetworkPackets is the metric conforming to the - // "system.network.packets" semantic conventions. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkPacketsName = "system.network.packets" - SystemNetworkPacketsUnit = "{packet}" - - // SystemNetworkErrors is the metric conforming to the "system.network.errors" - // semantic conventions. It represents the count of network errors detected. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - SystemNetworkErrorsName = "system.network.errors" - SystemNetworkErrorsUnit = "{error}" - SystemNetworkErrorsDescription = "Count of network errors detected" - - // SystemNetworkIo is the metric conforming to the "system.network.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkIoName = "system.network.io" - SystemNetworkIoUnit = "By" - - // SystemNetworkConnections is the metric conforming to the - // "system.network.connections" semantic conventions. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkConnectionsName = "system.network.connections" - SystemNetworkConnectionsUnit = "{connection}" - - // SystemProcessCount is the metric conforming to the "system.process.count" - // semantic conventions. It represents the total number of processes in each - // state. - // Instrument: updowncounter - // Unit: {process} - // Stability: Experimental - SystemProcessCountName = "system.process.count" - SystemProcessCountUnit = "{process}" - SystemProcessCountDescription = "Total number of processes in each state" - - // SystemProcessCreated is the metric conforming to the - // "system.process.created" semantic conventions. It represents the total - // number of processes created over uptime of the host. - // Instrument: counter - // Unit: {process} - // Stability: Experimental - SystemProcessCreatedName = "system.process.created" - SystemProcessCreatedUnit = "{process}" - SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" - - // SystemLinuxMemoryAvailable is the metric conforming to the - // "system.linux.memory.available" semantic conventions. It represents an - // estimate of how much memory is available for starting new applications, - // without causing swapping. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemLinuxMemoryAvailableName = "system.linux.memory.available" - SystemLinuxMemoryAvailableUnit = "By" - SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/MIGRATION.md new file mode 100644 index 00000000000..fc98b022e64 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/MIGRATION.md @@ -0,0 +1,29 @@ + +# Migration from v1.37.0 to v1.38.0 + +The `go.opentelemetry.io/otel/semconv/v1.38.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.37.0` with the following exceptions. + +## Removed + +The following declarations have been removed. +Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. + +If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. +If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. + +- `ProcessPagingFaultTypeKey` +- `ProcessPagingFaultTypeMajor` +- `ProcessPagingFaultTypeMinor` +- `SystemCPULogicalNumber` +- `SystemCPULogicalNumberKey` +- `SystemPagingTypeKey` +- `SystemPagingTypeMajor` +- `SystemPagingTypeMinor` +- `SystemProcessStatusDefunct` +- `SystemProcessStatusKey` +- `SystemProcessStatusRunning` +- `SystemProcessStatusSleeping` +- `SystemProcessStatusStopped` + +[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions +[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/README.md new file mode 100644 index 00000000000..dfe2efc3ce5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.38.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.38.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.38.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/attribute_group.go new file mode 100644 index 00000000000..56346a21972 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/attribute_group.go @@ -0,0 +1,15981 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.38.0" + +import "go.opentelemetry.io/otel/attribute" + +// Namespace: android +const ( + // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "created" + // Note: The Android lifecycle states are defined in + // [Activity lifecycle callbacks], and from which the `OS identifiers` are + // derived. + // + // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc + AndroidAppStateKey = attribute.Key("android.app.state") + + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version (`os.version`) of + // the android operating system. More information can be found in the + // [Android API levels documentation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "33", "32" + // + // [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found in the +// [Android API levels documentation]. +// +// [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// Enum values for android.app.state +var ( + // Any time before Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called in the app for the first time. + // + // Stability: development + AndroidAppStateCreated = AndroidAppStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, + // Context.stopService() has been called when the app was in the foreground + // state. + // + // Stability: development + AndroidAppStateBackground = AndroidAppStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called when the app was in either the created + // or background states. + // + // Stability: development + AndroidAppStateForeground = AndroidAppStateKey.String("foreground") +) + +// Namespace: app +const ( + // AppBuildIDKey is the attribute Key conforming to the "app.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6cff0a7e-cefc-4668-96f5-1273d8b334d0", + // "9f2b833506aa6973a92fde9733e6271f", "my-app-1.0.0-code-123" + AppBuildIDKey = attribute.Key("app.build_id") + + // AppInstallationIDKey is the attribute Key conforming to the + // "app.installation.id" semantic conventions. It represents a unique identifier + // representing the installation of an application on a specific device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" + // Note: Its value SHOULD persist across launches of the same application + // installation, including through application upgrades. + // It SHOULD change if the application is uninstalled or if all applications of + // the vendor are uninstalled. + // Additionally, users might be able to reset this value (e.g. by clearing + // application data). + // If an app is installed multiple times on the same device (e.g. in different + // accounts on Android), each `app.installation.id` SHOULD have a different + // value. + // If multiple OpenTelemetry SDKs are used within the same application, they + // SHOULD use the same value for `app.installation.id`. + // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the + // `app.installation.id`. + // + // For iOS, this value SHOULD be equal to the [vendor identifier]. + // + // For Android, examples of `app.installation.id` implementations include: + // + // - [Firebase Installation ID]. + // - A globally unique UUID which is persisted across sessions in your + // application. + // - [App set ID]. + // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. + // + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. + // + // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor + // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations + // [App set ID]: https://developer.android.com/identity/app-set-id + // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids + AppInstallationIDKey = attribute.Key("app.installation.id") + + // AppJankFrameCountKey is the attribute Key conforming to the + // "app.jank.frame_count" semantic conventions. It represents a number of frame + // renders that experienced jank. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 9, 42 + // Note: Depending on platform limitations, the value provided MAY be + // approximation. + AppJankFrameCountKey = attribute.Key("app.jank.frame_count") + + // AppJankPeriodKey is the attribute Key conforming to the "app.jank.period" + // semantic conventions. It represents the time period, in seconds, for which + // this jank is being reported. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 5.0, 10.24 + AppJankPeriodKey = attribute.Key("app.jank.period") + + // AppJankThresholdKey is the attribute Key conforming to the + // "app.jank.threshold" semantic conventions. It represents the minimum + // rendering threshold for this jank, in seconds. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.016, 0.7, 1.024 + AppJankThresholdKey = attribute.Key("app.jank.threshold") + + // AppScreenCoordinateXKey is the attribute Key conforming to the + // "app.screen.coordinate.x" semantic conventions. It represents the x + // (horizontal) coordinate of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 131 + AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") + + // AppScreenCoordinateYKey is the attribute Key conforming to the + // "app.screen.coordinate.y" semantic conventions. It represents the y + // (vertical) component of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12, 99 + AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") + + // AppScreenIDKey is the attribute Key conforming to the "app.screen.id" + // semantic conventions. It represents an identifier that uniquely + // differentiates this screen from other screens in the same application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", + // "com.example.app.MainActivity", "com.example.shop.ProductDetailFragment", + // "MyApp.ProfileView", "MyApp.ProfileViewController" + // Note: A screen represents only the part of the device display drawn by the + // app. It typically contains multiple widgets or UI components and is larger in + // scope than individual widgets. Multiple screens can coexist on the same + // display simultaneously (e.g., split view on tablets). + AppScreenIDKey = attribute.Key("app.screen.id") + + // AppScreenNameKey is the attribute Key conforming to the "app.screen.name" + // semantic conventions. It represents the name of an application screen. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MainActivity", "ProductDetailFragment", "ProfileView", + // "ProfileViewController" + // Note: A screen represents only the part of the device display drawn by the + // app. It typically contains multiple widgets or UI components and is larger in + // scope than individual widgets. Multiple screens can coexist on the same + // display simultaneously (e.g., split view on tablets). + AppScreenNameKey = attribute.Key("app.screen.name") + + // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" + // semantic conventions. It represents an identifier that uniquely + // differentiates this widget from other widgets in the same application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetIDKey = attribute.Key("app.widget.id") + + // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" + // semantic conventions. It represents the name of an application widget. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "submit", "attack", "Clear Cart" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetNameKey = attribute.Key("app.widget.name") +) + +// AppBuildID returns an attribute KeyValue conforming to the "app.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the application. +func AppBuildID(val string) attribute.KeyValue { + return AppBuildIDKey.String(val) +} + +// AppInstallationID returns an attribute KeyValue conforming to the +// "app.installation.id" semantic conventions. It represents a unique identifier +// representing the installation of an application on a specific device. +func AppInstallationID(val string) attribute.KeyValue { + return AppInstallationIDKey.String(val) +} + +// AppJankFrameCount returns an attribute KeyValue conforming to the +// "app.jank.frame_count" semantic conventions. It represents a number of frame +// renders that experienced jank. +func AppJankFrameCount(val int) attribute.KeyValue { + return AppJankFrameCountKey.Int(val) +} + +// AppJankPeriod returns an attribute KeyValue conforming to the +// "app.jank.period" semantic conventions. It represents the time period, in +// seconds, for which this jank is being reported. +func AppJankPeriod(val float64) attribute.KeyValue { + return AppJankPeriodKey.Float64(val) +} + +// AppJankThreshold returns an attribute KeyValue conforming to the +// "app.jank.threshold" semantic conventions. It represents the minimum rendering +// threshold for this jank, in seconds. +func AppJankThreshold(val float64) attribute.KeyValue { + return AppJankThresholdKey.Float64(val) +} + +// AppScreenCoordinateX returns an attribute KeyValue conforming to the +// "app.screen.coordinate.x" semantic conventions. It represents the x +// (horizontal) coordinate of a screen coordinate, in screen pixels. +func AppScreenCoordinateX(val int) attribute.KeyValue { + return AppScreenCoordinateXKey.Int(val) +} + +// AppScreenCoordinateY returns an attribute KeyValue conforming to the +// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) +// component of a screen coordinate, in screen pixels. +func AppScreenCoordinateY(val int) attribute.KeyValue { + return AppScreenCoordinateYKey.Int(val) +} + +// AppScreenID returns an attribute KeyValue conforming to the "app.screen.id" +// semantic conventions. It represents an identifier that uniquely differentiates +// this screen from other screens in the same application. +func AppScreenID(val string) attribute.KeyValue { + return AppScreenIDKey.String(val) +} + +// AppScreenName returns an attribute KeyValue conforming to the +// "app.screen.name" semantic conventions. It represents the name of an +// application screen. +func AppScreenName(val string) attribute.KeyValue { + return AppScreenNameKey.String(val) +} + +// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" +// semantic conventions. It represents an identifier that uniquely differentiates +// this widget from other widgets in the same application. +func AppWidgetID(val string) attribute.KeyValue { + return AppWidgetIDKey.String(val) +} + +// AppWidgetName returns an attribute KeyValue conforming to the +// "app.widget.name" semantic conventions. It represents the name of an +// application widget. +func AppWidgetName(val string) attribute.KeyValue { + return AppWidgetNameKey.String(val) +} + +// Namespace: artifact +const ( + // ArtifactAttestationFilenameKey is the attribute Key conforming to the + // "artifact.attestation.filename" semantic conventions. It represents the + // provenance filename of the built attestation which directly relates to the + // build artifact filename. This filename SHOULD accompany the artifact at + // publish time. See the [SLSA Relationship] specification for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0.attestation", + // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", + // "file-name-package.tar.gz.intoto.json1" + // + // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations + ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") + + // ArtifactAttestationHashKey is the attribute Key conforming to the + // "artifact.attestation.hash" semantic conventions. It represents the full + // [hash value (see glossary)], of the built attestation. Some envelopes in the + // [software attestation space] also refer to this as the **digest**. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec + ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") + + // ArtifactAttestationIDKey is the attribute Key conforming to the + // "artifact.attestation.id" semantic conventions. It represents the id of the + // build [software attestation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + // + // [software attestation]: https://slsa.dev/attestation-model + ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") + + // ArtifactFilenameKey is the attribute Key conforming to the + // "artifact.filename" semantic conventions. It represents the human readable + // file name of the artifact, typically generated during build and release + // processes. Often includes the package name and version in the file name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", + // "release-1.tar.gz", "file-name-package.tar.gz" + // Note: This file name can also act as the [Package Name] + // in cases where the package ecosystem maps accordingly. + // Additionally, the artifact [can be published] + // for others, but that is not a guarantee. + // + // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model + // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain + ArtifactFilenameKey = attribute.Key("artifact.filename") + + // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" + // semantic conventions. It represents the full [hash value (see glossary)], + // often found in checksum.txt on a release of the artifact and used to verify + // package integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" + // Note: The specific algorithm used to create the cryptographic hash value is + // not defined. In situations where an artifact has multiple + // cryptographic hashes, it is up to the implementer to choose which + // hash value to set here; this should be the most secure hash algorithm + // that is suitable for the situation and consistent with the + // corresponding attestation. The implementer can then provide the other + // hash values through an additional set of attribute extensions as they + // deem necessary. + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + ArtifactHashKey = attribute.Key("artifact.hash") + + // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" + // semantic conventions. It represents the [Package URL] of the + // [package artifact] provides a standard way to identify and locate the + // packaged artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pkg:github/package-url/purl-spec@1209109710924", + // "pkg:npm/foo@12.12.3" + // + // [Package URL]: https://github.com/package-url/purl-spec + // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model + ArtifactPurlKey = attribute.Key("artifact.purl") + + // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" + // semantic conventions. It represents the version of the artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v0.1.0", "1.2.1", "122691-build" + ArtifactVersionKey = attribute.Key("artifact.version") +) + +// ArtifactAttestationFilename returns an attribute KeyValue conforming to the +// "artifact.attestation.filename" semantic conventions. It represents the +// provenance filename of the built attestation which directly relates to the +// build artifact filename. This filename SHOULD accompany the artifact at +// publish time. See the [SLSA Relationship] specification for more information. +// +// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations +func ArtifactAttestationFilename(val string) attribute.KeyValue { + return ArtifactAttestationFilenameKey.String(val) +} + +// ArtifactAttestationHash returns an attribute KeyValue conforming to the +// "artifact.attestation.hash" semantic conventions. It represents the full +// [hash value (see glossary)], of the built attestation. Some envelopes in the +// [software attestation space] also refer to this as the **digest**. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec +func ArtifactAttestationHash(val string) attribute.KeyValue { + return ArtifactAttestationHashKey.String(val) +} + +// ArtifactAttestationID returns an attribute KeyValue conforming to the +// "artifact.attestation.id" semantic conventions. It represents the id of the +// build [software attestation]. +// +// [software attestation]: https://slsa.dev/attestation-model +func ArtifactAttestationID(val string) attribute.KeyValue { + return ArtifactAttestationIDKey.String(val) +} + +// ArtifactFilename returns an attribute KeyValue conforming to the +// "artifact.filename" semantic conventions. It represents the human readable +// file name of the artifact, typically generated during build and release +// processes. Often includes the package name and version in the file name. +func ArtifactFilename(val string) attribute.KeyValue { + return ArtifactFilenameKey.String(val) +} + +// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" +// semantic conventions. It represents the full [hash value (see glossary)], +// often found in checksum.txt on a release of the artifact and used to verify +// package integrity. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +func ArtifactHash(val string) attribute.KeyValue { + return ArtifactHashKey.String(val) +} + +// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" +// semantic conventions. It represents the [Package URL] of the +// [package artifact] provides a standard way to identify and locate the packaged +// artifact. +// +// [Package URL]: https://github.com/package-url/purl-spec +// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model +func ArtifactPurl(val string) attribute.KeyValue { + return ArtifactPurlKey.String(val) +} + +// ArtifactVersion returns an attribute KeyValue conforming to the +// "artifact.version" semantic conventions. It represents the version of the +// artifact. +func ArtifactVersion(val string) attribute.KeyValue { + return ArtifactVersionKey.String(val) +} + +// Namespace: aws +const ( + // AWSBedrockGuardrailIDKey is the attribute Key conforming to the + // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique + // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and + // prevent unwanted behavior from model responses or user messages. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "sgi5gkybzqak" + // + // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html + AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") + + // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the + // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the + // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a + // bank of information that can be queried by models to generate more relevant + // responses and augment prompts. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "XFWUPB9PAW" + // + // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html + AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") + + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the + // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the + // JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lives", "id" + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the value + // of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }" + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of the + // `Count` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the + // value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "CatsTable" + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }" + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` + // request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value of + // the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "name_to_group" + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the + // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents + // the JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of the + // `Limit` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }" + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value of + // the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, + // ProductReviews" + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.WriteCapacityUnits` request + // parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of + // the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of + // the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of the + // `Segment` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of the + // `Select` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ALL_ATTRIBUTES", "COUNT" + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the number of + // items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys in + // the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "Cats" + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the value + // of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS cluster]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + // + // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container instance]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" + // + // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" + // semantic conventions. It represents the ARN of a running [ECS task]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", + // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + // + // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family name of + // the [ECS task definition] used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-family" + // + // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID MUST + // be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", + // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision for + // the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "8", "26" + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") + + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS + // cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") + + // AWSExtendedRequestIDKey is the attribute Key conforming to the + // "aws.extended_request_id" semantic conventions. It represents the AWS + // extended request ID as returned in the response header `x-amz-id-2`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" + AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") + + // AWSKinesisStreamNameKey is the attribute Key conforming to the + // "aws.kinesis.stream_name" semantic conventions. It represents the name of the + // AWS Kinesis [stream] the request refers to. Corresponds to the + // `--stream-name` parameter of the Kinesis [describe-stream] operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-stream-name" + // + // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html + // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html + AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") + + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked + // ARN as provided on the `Context` passed to the function ( + // `Lambda-Runtime-Invoked-Function-Arn` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" + // Note: This may be different from `cloud.resource_id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") + + // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the + // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID + // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda + // function. It's contents are read by Lambda and used to trigger a function. + // This isn't available in the lambda execution context or the lambda runtime + // environtment. This is going to be populated by the AWS SDK for each language + // when that UUID is present. Some of these operations are + // Create/Delete/Get/List/Update EventSourceMapping. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" + // + // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html + AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource + // Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" + // Note: See the [log group ARN format documentation]. + // + // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of the + // AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/aws/lambda/my-function", "opentelemetry-service" + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each + // write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the + // AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + // Note: See the [log stream ARN format documentation]. One log group can + // contain several log streams, so these ARNs necessarily identify both a log + // group and a log stream. + // + // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) of the + // AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in the + // response headers `x-amzn-requestid`, `x-amzn-request-id` or + // `x-amz-request-id`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" + AWSRequestIDKey = attribute.Key("aws.request_id") + + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request refers to. + // Corresponds to the `--bucket` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-bucket-name" + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source object + // (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 API]. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [upload-part-copy] + // + // + // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" + // Note: The `delete` attribute is only applicable to the [delete-object] + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 API]. + // + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `key` attribute is applicable to all object-related S3 operations, + // i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [delete-object] + // - [get-object] + // - [head-object] + // - [put-object] + // - [restore-object] + // - [select-object-content] + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [create-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html + // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html + // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html + // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html + // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number of + // the part being uploaded in a multipart-upload operation. This is a positive + // integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the [upload-part] + // and [upload-part-copy] operations. + // The `part_number` attribute corresponds to the `--part-number` parameter of + // the + // [upload-part operation within the S3 API]. + // + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" + // semantic conventions. It represents the upload ID that identifies the + // multipart upload. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" + // Note: The `upload_id` attribute applies to S3 multipart-upload operations and + // corresponds to the `--upload-id` parameter + // of the [S3 API] multipart operations. + // This applies in particular to the following operations: + // + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the + // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN + // of the Secret stored in the Secrets Mangger. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" + AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") + + // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" + // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon + // SNS [topic] is a logical access point that acts as a communication channel. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" + // + // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html + AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") + + // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" + // semantic conventions. It represents the URL of the AWS SQS Queue. It's a + // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is + // used to access the queue and perform actions on it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" + AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") + + // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the + // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN + // of the AWS Step Functions Activity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" + AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") + + // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the + // "aws.step_functions.state_machine.arn" semantic conventions. It represents + // the ARN of the AWS Step Functions State Machine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" + AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") +) + +// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the +// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and +// prevent unwanted behavior from model responses or user messages. +// +// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html +func AWSBedrockGuardrailID(val string) attribute.KeyValue { + return AWSBedrockGuardrailIDKey.String(val) +} + +// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the +// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of +// information that can be queried by models to generate more relevant responses +// and augment prompts. +// +// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html +func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { + return AWSBedrockKnowledgeBaseIDKey.String(val) +} + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to +// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents +// the JSON-serialized value of each item in the `AttributeDefinitions` request +// field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the +// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value +// of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the +// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the +// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the +// value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field. +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of the +// `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to +// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents +// the JSON-serialized value of the `ItemCollectionMetrics` response field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents +// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request +// field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of the +// `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming +// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of +// the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the +// `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value of +// the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an +// [ECS cluster]. +// +// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container instance]. +// +// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS task]. +// +// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task definition] used to create the ECS task. +// +// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" +// semantic conventions. It represents the ID of a running ECS task. The ID MUST +// be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// AWSExtendedRequestID returns an attribute KeyValue conforming to the +// "aws.extended_request_id" semantic conventions. It represents the AWS extended +// request ID as returned in the response header `x-amz-id-2`. +func AWSExtendedRequestID(val string) attribute.KeyValue { + return AWSExtendedRequestIDKey.String(val) +} + +// AWSKinesisStreamName returns an attribute KeyValue conforming to the +// "aws.kinesis.stream_name" semantic conventions. It represents the name of the +// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` +// parameter of the Kinesis [describe-stream] operation. +// +// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html +// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html +func AWSKinesisStreamName(val string) attribute.KeyValue { + return AWSKinesisStreamNameKey.String(val) +} + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked +// ARN as provided on the `Context` passed to the function ( +// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` +// applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the +// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID +// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda +// function. It's contents are read by Lambda and used to trigger a function. +// This isn't available in the lambda execution context or the lambda runtime +// environtment. This is going to be populated by the AWS SDK for each language +// when that UUID is present. Some of these operations are +// Create/Delete/Get/List/Update EventSourceMapping. +// +// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html +func AWSLambdaResourceMappingID(val string) attribute.KeyValue { + return AWSLambdaResourceMappingIDKey.String(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of the +// AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" +// semantic conventions. It represents the AWS request ID as returned in the +// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` +// . +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" +// semantic conventions. It represents the S3 bucket name the request refers to. +// Corresponds to the `--bucket` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object (in +// the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" +// semantic conventions. It represents the delete request container that +// specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic +// conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the +// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of +// the Secret stored in the Secrets Mangger. +func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { + return AWSSecretsmanagerSecretARNKey.String(val) +} + +// AWSSNSTopicARN returns an attribute KeyValue conforming to the +// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS +// Topic. An Amazon SNS [topic] is a logical access point that acts as a +// communication channel. +// +// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html +func AWSSNSTopicARN(val string) attribute.KeyValue { + return AWSSNSTopicARNKey.String(val) +} + +// AWSSQSQueueURL returns an attribute KeyValue conforming to the +// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS +// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service +// (SQS) and is used to access the queue and perform actions on it. +func AWSSQSQueueURL(val string) attribute.KeyValue { + return AWSSQSQueueURLKey.String(val) +} + +// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the +// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN +// of the AWS Step Functions Activity. +func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { + return AWSStepFunctionsActivityARNKey.String(val) +} + +// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to +// the "aws.step_functions.state_machine.arn" semantic conventions. It represents +// the ARN of the AWS Step Functions State Machine. +func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { + return AWSStepFunctionsStateMachineARNKey.String(val) +} + +// Enum values for aws.ecs.launchtype +var ( + // Amazon EC2 + // Stability: development + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // Amazon Fargate + // Stability: development + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Namespace: azure +const ( + // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" + // semantic conventions. It represents the unique identifier of the client + // instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" + AzureClientIDKey = attribute.Key("azure.client.id") + + // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the + // "azure.cosmosdb.connection.mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") + + // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the + // "azure.cosmosdb.consistency.level" semantic conventions. It represents the + // account or request [consistency level]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", + // "Session" + // + // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels + AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") + + // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to + // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It + // represents the list of regions contacted during operation in the order that + // they were contacted. If there is more than one region listed, it indicates + // that the operation was performed on multiple regions i.e. cross-regional + // call. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "North Central US", "Australia East", "Australia Southeast" + // Note: Region name matches the format of `displayName` in [Azure Location API] + // + // [Azure Location API]: https://learn.microsoft.com/rest/api/resources/subscriptions/list-locations + AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") + + // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the + // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents + // the number of request units consumed by the operation. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 46.18, 1.0 + AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") + + // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the + // "azure.cosmosdb.request.body.size" semantic conventions. It represents the + // request payload size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") + + // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the + // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents + // the cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000, 1002 + AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") + + // AzureResourceProviderNamespaceKey is the attribute Key conforming to the + // "azure.resource_provider.namespace" semantic conventions. It represents the + // [Azure Resource Provider Namespace] as recognized by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" + // + // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers + AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace") + + // AzureServiceRequestIDKey is the attribute Key conforming to the + // "azure.service.request.id" semantic conventions. It represents the unique + // identifier of the service request. It's generated by the Azure service and + // returned with the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00000000-0000-0000-0000-000000000000" + AzureServiceRequestIDKey = attribute.Key("azure.service.request.id") +) + +// AzureClientID returns an attribute KeyValue conforming to the +// "azure.client.id" semantic conventions. It represents the unique identifier of +// the client instance. +func AzureClientID(val string) attribute.KeyValue { + return AzureClientIDKey.String(val) +} + +// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue +// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic +// conventions. It represents the list of regions contacted during operation in +// the order that they were contacted. If there is more than one region listed, +// it indicates that the operation was performed on multiple regions i.e. +// cross-regional call. +func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { + return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) +} + +// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming +// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It +// represents the number of request units consumed by the operation. +func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { + return AzureCosmosDBOperationRequestChargeKey.Float64(val) +} + +// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the +// "azure.cosmosdb.request.body.size" semantic conventions. It represents the +// request payload size in bytes. +func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { + return AzureCosmosDBRequestBodySizeKey.Int(val) +} + +// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to +// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It +// represents the cosmos DB sub status code. +func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { + return AzureCosmosDBResponseSubStatusCodeKey.Int(val) +} + +// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the +// "azure.resource_provider.namespace" semantic conventions. It represents the +// [Azure Resource Provider Namespace] as recognized by the client. +// +// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers +func AzureResourceProviderNamespace(val string) attribute.KeyValue { + return AzureResourceProviderNamespaceKey.String(val) +} + +// AzureServiceRequestID returns an attribute KeyValue conforming to the +// "azure.service.request.id" semantic conventions. It represents the unique +// identifier of the service request. It's generated by the Azure service and +// returned with the response. +func AzureServiceRequestID(val string) attribute.KeyValue { + return AzureServiceRequestIDKey.String(val) +} + +// Enum values for azure.cosmosdb.connection.mode +var ( + // Gateway (HTTP) connection. + // Stability: development + AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") + // Direct connection. + // Stability: development + AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") +) + +// Enum values for azure.cosmosdb.consistency.level +var ( + // Strong + // Stability: development + AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") + // Bounded Staleness + // Stability: development + AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") + // Session + // Stability: development + AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") + // Eventual + // Stability: development + AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") + // Consistent Prefix + // Stability: development + AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") +) + +// Namespace: browser +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.brands`). + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the "browser.language" + // semantic conventions. It represents the preferred language of the user using + // the browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "en", "en-US", "fr", "fr-FR" + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the browser is + // running on a mobile device. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be + // left unset. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" + // semantic conventions. It represents the platform on which the browser is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Windows", "macOS", "Android" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the + // [W3C User-Agent Client Hints specification]. Note that some (but not all) of + // these values can overlap with values in the + // [`os.type` and `os.name` attributes]. However, for consistency, the values in + // the `browser.platform` attribute should capture the exact value that the user + // agent provides. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform + // [`os.type` and `os.name` attributes]: ./os.md + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" +// semantic conventions. It represents the array of brand name and version +// separated by a space. +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred language +// of the user using the browser. +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" +// semantic conventions. It represents a boolean that is true if the browser is +// running on a mobile device. +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running. +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Namespace: cassandra +const ( + // CassandraConsistencyLevelKey is the attribute Key conforming to the + // "cassandra.consistency.level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from [CQL]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html + CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") + + // CassandraCoordinatorDCKey is the attribute Key conforming to the + // "cassandra.coordinator.dc" semantic conventions. It represents the data + // center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: us-west-2 + CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") + + // CassandraCoordinatorIDKey is the attribute Key conforming to the + // "cassandra.coordinator.id" semantic conventions. It represents the ID of the + // coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af + CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") + + // CassandraPageSizeKey is the attribute Key conforming to the + // "cassandra.page.size" semantic conventions. It represents the fetch size used + // for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 5000 + CassandraPageSizeKey = attribute.Key("cassandra.page.size") + + // CassandraQueryIdempotentKey is the attribute Key conforming to the + // "cassandra.query.idempotent" semantic conventions. It represents the whether + // or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") + + // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the + // "cassandra.speculative_execution.count" semantic conventions. It represents + // the number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 2 + CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") +) + +// CassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "cassandra.coordinator.dc" semantic conventions. It represents the data center +// of the coordinating node for a query. +func CassandraCoordinatorDC(val string) attribute.KeyValue { + return CassandraCoordinatorDCKey.String(val) +} + +// CassandraCoordinatorID returns an attribute KeyValue conforming to the +// "cassandra.coordinator.id" semantic conventions. It represents the ID of the +// coordinating node for a query. +func CassandraCoordinatorID(val string) attribute.KeyValue { + return CassandraCoordinatorIDKey.String(val) +} + +// CassandraPageSize returns an attribute KeyValue conforming to the +// "cassandra.page.size" semantic conventions. It represents the fetch size used +// for paging, i.e. how many rows will be returned at once. +func CassandraPageSize(val int) attribute.KeyValue { + return CassandraPageSizeKey.Int(val) +} + +// CassandraQueryIdempotent returns an attribute KeyValue conforming to the +// "cassandra.query.idempotent" semantic conventions. It represents the whether +// or not the query is idempotent. +func CassandraQueryIdempotent(val bool) attribute.KeyValue { + return CassandraQueryIdempotentKey.Bool(val) +} + +// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to +// the "cassandra.speculative_execution.count" semantic conventions. It +// represents the number of times a query was speculatively executed. Not set or +// `0` if the query was not executed speculatively. +func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return CassandraSpeculativeExecutionCountKey.Int(val) +} + +// Enum values for cassandra.consistency.level +var ( + // All + // Stability: development + CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") + // Each Quorum + // Stability: development + CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") + // Quorum + // Stability: development + CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") + // Local Quorum + // Stability: development + CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") + // One + // Stability: development + CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") + // Two + // Stability: development + CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") + // Three + // Stability: development + CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") + // Local One + // Stability: development + CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") + // Any + // Stability: development + CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") + // Serial + // Stability: development + CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") + // Local Serial + // Stability: development + CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") +) + +// Namespace: cicd +const ( + // CICDPipelineActionNameKey is the attribute Key conforming to the + // "cicd.pipeline.action.name" semantic conventions. It represents the kind of + // action a pipeline run is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BUILD", "RUN", "SYNC" + CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") + + // CICDPipelineNameKey is the attribute Key conforming to the + // "cicd.pipeline.name" semantic conventions. It represents the human readable + // name of the pipeline within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Build and Test", "Lint", "Deploy Go Project", + // "deploy_to_environment" + CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") + + // CICDPipelineResultKey is the attribute Key conforming to the + // "cicd.pipeline.result" semantic conventions. It represents the result of a + // pipeline run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") + + // CICDPipelineRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.run.id" semantic conventions. It represents the unique + // identifier of a pipeline run within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "120912" + CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") + + // CICDPipelineRunStateKey is the attribute Key conforming to the + // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline + // run goes through these states during its lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pending", "executing", "finalizing" + CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") + + // CICDPipelineRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of + // the pipeline run, providing the complete address in order to locate and + // identify the pipeline run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") + + // CICDPipelineTaskNameKey is the attribute Key conforming to the + // "cicd.pipeline.task.name" semantic conventions. It represents the human + // readable name of a task within a pipeline. Task here most closely aligns with + // a [computing process] in a pipeline. Other terms for tasks include commands, + // steps, and procedures. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" + // + // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) + CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") + + // CICDPipelineTaskRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique + // identifier of a task run within a pipeline. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "12097" + CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") + + // CICDPipelineTaskRunResultKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.result" semantic conventions. It represents the + // result of a task run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") + + // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the + // [URL] of the pipeline task run, providing the complete address in order to + // locate and identify the pipeline task run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") + + // CICDPipelineTaskTypeKey is the attribute Key conforming to the + // "cicd.pipeline.task.type" semantic conventions. It represents the type of the + // task within a pipeline. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "build", "test", "deploy" + CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") + + // CICDSystemComponentKey is the attribute Key conforming to the + // "cicd.system.component" semantic conventions. It represents the name of a + // component of the CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "controller", "scheduler", "agent" + CICDSystemComponentKey = attribute.Key("cicd.system.component") + + // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" + // semantic conventions. It represents the unique identifier of a worker within + // a CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "abc123", "10.0.1.2", "controller" + CICDWorkerIDKey = attribute.Key("cicd.worker.id") + + // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" + // semantic conventions. It represents the name of a worker within a CICD + // system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "agent-abc", "controller", "Ubuntu LTS" + CICDWorkerNameKey = attribute.Key("cicd.worker.name") + + // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" + // semantic conventions. It represents the state of a CICD worker / agent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle", "busy", "down" + CICDWorkerStateKey = attribute.Key("cicd.worker.state") + + // CICDWorkerURLFullKey is the attribute Key conforming to the + // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the + // worker, providing the complete address in order to locate and identify the + // worker. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://cicd.example.org/worker/abc123" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") +) + +// CICDPipelineName returns an attribute KeyValue conforming to the +// "cicd.pipeline.name" semantic conventions. It represents the human readable +// name of the pipeline within a CI/CD system. +func CICDPipelineName(val string) attribute.KeyValue { + return CICDPipelineNameKey.String(val) +} + +// CICDPipelineRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.id" semantic conventions. It represents the unique +// identifier of a pipeline run within a CI/CD system. +func CICDPipelineRunID(val string) attribute.KeyValue { + return CICDPipelineRunIDKey.String(val) +} + +// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of +// the pipeline run, providing the complete address in order to locate and +// identify the pipeline run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineRunURLFull(val string) attribute.KeyValue { + return CICDPipelineRunURLFullKey.String(val) +} + +// CICDPipelineTaskName returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.name" semantic conventions. It represents the human +// readable name of a task within a pipeline. Task here most closely aligns with +// a [computing process] in a pipeline. Other terms for tasks include commands, +// steps, and procedures. +// +// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) +func CICDPipelineTaskName(val string) attribute.KeyValue { + return CICDPipelineTaskNameKey.String(val) +} + +// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique +// identifier of a task run within a pipeline. +func CICDPipelineTaskRunID(val string) attribute.KeyValue { + return CICDPipelineTaskRunIDKey.String(val) +} + +// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the +// [URL] of the pipeline task run, providing the complete address in order to +// locate and identify the pipeline task run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { + return CICDPipelineTaskRunURLFullKey.String(val) +} + +// CICDSystemComponent returns an attribute KeyValue conforming to the +// "cicd.system.component" semantic conventions. It represents the name of a +// component of the CICD system. +func CICDSystemComponent(val string) attribute.KeyValue { + return CICDSystemComponentKey.String(val) +} + +// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" +// semantic conventions. It represents the unique identifier of a worker within a +// CICD system. +func CICDWorkerID(val string) attribute.KeyValue { + return CICDWorkerIDKey.String(val) +} + +// CICDWorkerName returns an attribute KeyValue conforming to the +// "cicd.worker.name" semantic conventions. It represents the name of a worker +// within a CICD system. +func CICDWorkerName(val string) attribute.KeyValue { + return CICDWorkerNameKey.String(val) +} + +// CICDWorkerURLFull returns an attribute KeyValue conforming to the +// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the +// worker, providing the complete address in order to locate and identify the +// worker. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDWorkerURLFull(val string) attribute.KeyValue { + return CICDWorkerURLFullKey.String(val) +} + +// Enum values for cicd.pipeline.action.name +var ( + // The pipeline run is executing a build. + // Stability: development + CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") + // The pipeline run is executing. + // Stability: development + CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") + // The pipeline run is executing a sync. + // Stability: development + CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") +) + +// Enum values for cicd.pipeline.result +var ( + // The pipeline run finished successfully. + // Stability: development + CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") + // The pipeline run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the pipeline run. + // Stability: development + CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") + // The pipeline run failed due to an error in the CICD system, eg. due to the + // worker being killed. + // Stability: development + CICDPipelineResultError = CICDPipelineResultKey.String("error") + // A timeout caused the pipeline run to be interrupted. + // Stability: development + CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") + // The pipeline run was cancelled, eg. by a user manually cancelling the + // pipeline run. + // Stability: development + CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") + // The pipeline run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") +) + +// Enum values for cicd.pipeline.run.state +var ( + // The run pending state spans from the event triggering the pipeline run until + // the execution of the run starts (eg. time spent in a queue, provisioning + // agents, creating run resources). + // + // Stability: development + CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") + // The executing state spans the execution of any run tasks (eg. build, test). + // Stability: development + CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") + // The finalizing state spans from when the run has finished executing (eg. + // cleanup of run resources). + // Stability: development + CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") +) + +// Enum values for cicd.pipeline.task.run.result +var ( + // The task run finished successfully. + // Stability: development + CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") + // The task run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the task run. + // Stability: development + CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") + // The task run failed due to an error in the CICD system, eg. due to the worker + // being killed. + // Stability: development + CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") + // A timeout caused the task run to be interrupted. + // Stability: development + CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") + // The task run was cancelled, eg. by a user manually cancelling the task run. + // Stability: development + CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") + // The task run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") +) + +// Enum values for cicd.pipeline.task.type +var ( + // build + // Stability: development + CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") + // test + // Stability: development + CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") + // deploy + // Stability: development + CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") +) + +// Enum values for cicd.worker.state +var ( + // The worker is not performing work for the CICD system. It is available to the + // CICD system to perform work on (online / idle). + // Stability: development + CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") + // The worker is performing work for the CICD system. + // Stability: development + CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") + // The worker is not available to the CICD system (disconnected / down). + // Stability: development + CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") +) + +// Namespace: client +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.address` SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" semantic + // conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.port` SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the "client.address" +// semantic conventions. It represents the client address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// Namespace: cloud +const ( + // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" + // semantic conventions. It represents the cloud account ID the resource is + // assigned to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "111111111111", "opentelemetry" + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to increase + // availability. Availability zone represents the zone where the resource is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-east-1c" + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic + // conventions. It represents the geographical region within a cloud provider. + // When associated with a resource, this attribute specifies the region where + // the resource operates. When calling services or APIs deployed on a cloud, + // this attribute identifies the region where the called destination is + // deployed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1", "us-east-1" + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions], [AWS regions], [Azure regions], + // [Google Cloud regions], or [Tencent Cloud regions]. + // + // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm + // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ + // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ + // [Google Cloud regions]: https://cloud.google.com/about/locations + // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" + // semantic conventions. It represents the cloud provider-specific native + // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a + // [fully qualified resource ID] on Azure, a [full resource name] on GCP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", + // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", + // "/subscriptions//resourceGroups/ + // /providers/Microsoft.Web/sites//functions/" + // Note: On some cloud providers, it may not be possible to determine the full + // ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute + // and they apply: + // + // - **AWS Lambda:** The function [ARN]. + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix] + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases. + // - **GCP:** The [URI of the resource] + // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, + // *not* the function app, having the form + // + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` + // . + // This means that a span attribute MUST be used, as an Azure function app + // can host multiple functions that would usually share + // a TracerProvider. + // + // + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + // [full resource name]: https://google.aip.dev/122#full-resource-names + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html + // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names + // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" +// semantic conventions. It represents the geographical region within a cloud +// provider. When associated with a resource, this attribute specifies the region +// where the resource operates. When calling services or APIs deployed on a +// cloud, this attribute identifies the region where the called destination is +// deployed. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] +// on GCP). +// +// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id +// [full resource name]: https://google.aip.dev/122#full-resource-names +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Enum values for cloud.platform +var ( + // Alibaba Cloud Elastic Compute Service + // Stability: development + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + // Stability: development + CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + // Stability: development + CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + // Stability: development + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + // Stability: development + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + // Stability: development + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + // Stability: development + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + // Stability: development + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + // Stability: development + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + // Stability: development + CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + // Stability: development + CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm") + // Azure Container Apps + // Stability: development + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps") + // Azure Container Instances + // Stability: development + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances") + // Azure Kubernetes Service + // Stability: development + CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks") + // Azure Functions + // Stability: development + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions") + // Azure App Service + // Stability: development + CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service") + // Azure Red Hat OpenShift + // Stability: development + CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift") + // Google Bare Metal Solution (BMS) + // Stability: development + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + // Stability: development + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + // Stability: development + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + // Stability: development + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + // Stability: development + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + // Stability: development + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + // Stability: development + CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + // Stability: development + CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") + // Compute on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") + // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") + // Tencent Cloud Cloud Virtual Machine (CVM) + // Stability: development + CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + // Stability: development + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + // Stability: development + CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Enum values for cloud.provider +var ( + // Alibaba Cloud + // Stability: development + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + // Stability: development + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + // Stability: development + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + // Stability: development + CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") + // Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") + // Tencent Cloud + // Stability: development + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// Namespace: cloudevents +const ( + // CloudEventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the [event_id] + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" + // + // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id + CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudEventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the [source] + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", + // "my-service" + // + // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 + CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudEventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents specification] which the event uses. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + // + // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion + CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudEventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the [subject] + // of the event in the context of the event producer (identified by source). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: mynewfile.jpg + // + // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject + CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudEventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the [event_type] + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" + // + // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type + CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudEventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the [event_id] +// uniquely identifies the event. +// +// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id +func CloudEventsEventID(val string) attribute.KeyValue { + return CloudEventsEventIDKey.String(val) +} + +// CloudEventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the [source] +// identifies the context in which an event happened. +// +// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 +func CloudEventsEventSource(val string) attribute.KeyValue { + return CloudEventsEventSourceKey.String(val) +} + +// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the +// "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents specification] which the event uses. +// +// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion +func CloudEventsEventSpecVersion(val string) attribute.KeyValue { + return CloudEventsEventSpecVersionKey.String(val) +} + +// CloudEventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the [subject] +// of the event in the context of the event producer (identified by source). +// +// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject +func CloudEventsEventSubject(val string) attribute.KeyValue { + return CloudEventsEventSubjectKey.String(val) +} + +// CloudEventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the [event_type] +// contains a value describing the type of event related to the originating +// occurrence. +// +// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type +func CloudEventsEventType(val string) attribute.KeyValue { + return CloudEventsEventTypeKey.String(val) +} + +// Namespace: cloudfoundry +const ( + // CloudFoundryAppIDKey is the attribute Key conforming to the + // "cloudfoundry.app.id" semantic conventions. It represents the guid of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_id`. This is the same value as + // reported by `cf app --guid`. + CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") + + // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.app.instance.id" semantic conventions. It represents the index + // of the application instance. 0 when just one instance is active. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0", "1" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the application instance index for applications + // deployed on the runtime. + // + // Application instrumentation should use the value from environment + // variable `CF_INSTANCE_INDEX`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") + + // CloudFoundryAppNameKey is the attribute Key conforming to the + // "cloudfoundry.app.name" semantic conventions. It represents the name of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-app-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_name`. This is the same value + // as reported by `cf apps`. + CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") + + // CloudFoundryOrgIDKey is the attribute Key conforming to the + // "cloudfoundry.org.id" semantic conventions. It represents the guid of the + // CloudFoundry org the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_id`. This is the same value as + // reported by `cf org --guid`. + CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") + + // CloudFoundryOrgNameKey is the attribute Key conforming to the + // "cloudfoundry.org.name" semantic conventions. It represents the name of the + // CloudFoundry organization the app is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_name`. This is the same value as + // reported by `cf orgs`. + CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") + + // CloudFoundryProcessIDKey is the attribute Key conforming to the + // "cloudfoundry.process.id" semantic conventions. It represents the UID + // identifying the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to + // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. + // For system components, this could be the actual PID. + CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") + + // CloudFoundryProcessTypeKey is the attribute Key conforming to the + // "cloudfoundry.process.type" semantic conventions. It represents the type of + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "web" + // Note: CloudFoundry applications can consist of multiple jobs. Usually the + // main process will be of type `web`. There can be additional background + // tasks or side-cars with different process types. + CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") + + // CloudFoundrySpaceIDKey is the attribute Key conforming to the + // "cloudfoundry.space.id" semantic conventions. It represents the guid of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_id`. This is the same value as + // reported by `cf space --guid`. + CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") + + // CloudFoundrySpaceNameKey is the attribute Key conforming to the + // "cloudfoundry.space.name" semantic conventions. It represents the name of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-space-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_name`. This is the same value as + // reported by `cf spaces`. + CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") + + // CloudFoundrySystemIDKey is the attribute Key conforming to the + // "cloudfoundry.system.id" semantic conventions. It represents a guid or + // another name describing the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cf/gorouter" + // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the component name, e.g. "gorouter", for + // CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.id` should be set to + // `spec.deployment/spec.name`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") + + // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid + // describing the concrete instance of the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the vm id for CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.instance.id` should be set to `spec.id`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") +) + +// CloudFoundryAppID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.id" semantic conventions. It represents the guid of the +// application. +func CloudFoundryAppID(val string) attribute.KeyValue { + return CloudFoundryAppIDKey.String(val) +} + +// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.instance.id" semantic conventions. It represents the index +// of the application instance. 0 when just one instance is active. +func CloudFoundryAppInstanceID(val string) attribute.KeyValue { + return CloudFoundryAppInstanceIDKey.String(val) +} + +// CloudFoundryAppName returns an attribute KeyValue conforming to the +// "cloudfoundry.app.name" semantic conventions. It represents the name of the +// application. +func CloudFoundryAppName(val string) attribute.KeyValue { + return CloudFoundryAppNameKey.String(val) +} + +// CloudFoundryOrgID returns an attribute KeyValue conforming to the +// "cloudfoundry.org.id" semantic conventions. It represents the guid of the +// CloudFoundry org the application is running in. +func CloudFoundryOrgID(val string) attribute.KeyValue { + return CloudFoundryOrgIDKey.String(val) +} + +// CloudFoundryOrgName returns an attribute KeyValue conforming to the +// "cloudfoundry.org.name" semantic conventions. It represents the name of the +// CloudFoundry organization the app is running in. +func CloudFoundryOrgName(val string) attribute.KeyValue { + return CloudFoundryOrgNameKey.String(val) +} + +// CloudFoundryProcessID returns an attribute KeyValue conforming to the +// "cloudfoundry.process.id" semantic conventions. It represents the UID +// identifying the process. +func CloudFoundryProcessID(val string) attribute.KeyValue { + return CloudFoundryProcessIDKey.String(val) +} + +// CloudFoundryProcessType returns an attribute KeyValue conforming to the +// "cloudfoundry.process.type" semantic conventions. It represents the type of +// process. +func CloudFoundryProcessType(val string) attribute.KeyValue { + return CloudFoundryProcessTypeKey.String(val) +} + +// CloudFoundrySpaceID returns an attribute KeyValue conforming to the +// "cloudfoundry.space.id" semantic conventions. It represents the guid of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceID(val string) attribute.KeyValue { + return CloudFoundrySpaceIDKey.String(val) +} + +// CloudFoundrySpaceName returns an attribute KeyValue conforming to the +// "cloudfoundry.space.name" semantic conventions. It represents the name of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceName(val string) attribute.KeyValue { + return CloudFoundrySpaceNameKey.String(val) +} + +// CloudFoundrySystemID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.id" semantic conventions. It represents a guid or another +// name describing the event source. +func CloudFoundrySystemID(val string) attribute.KeyValue { + return CloudFoundrySystemIDKey.String(val) +} + +// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid +// describing the concrete instance of the event source. +func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { + return CloudFoundrySystemInstanceIDKey.String(val) +} + +// Namespace: code +const ( + // CodeColumnNumberKey is the attribute Key conforming to the + // "code.column.number" semantic conventions. It represents the column number in + // `code.file.path` best representing the operation. It SHOULD point within the + // code unit named in `code.function.name`. This attribute MUST NOT be used on + // the Profile signal since the data is already captured in 'message Line'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeColumnNumberKey = attribute.Key("code.column.number") + + // CodeFilePathKey is the attribute Key conforming to the "code.file.path" + // semantic conventions. It represents the source code file name that identifies + // the code unit as uniquely as possible (preferably an absolute file path). + // This attribute MUST NOT be used on the Profile signal since the data is + // already captured in 'message Function'. This constraint is imposed to prevent + // redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: /usr/local/MyApplication/content_root/app/index.php + CodeFilePathKey = attribute.Key("code.file.path") + + // CodeFunctionNameKey is the attribute Key conforming to the + // "code.function.name" semantic conventions. It represents the method or + // function fully-qualified name without arguments. The value should fit the + // natural representation of the language runtime, which is also likely the same + // used within `code.stacktrace` attribute value. This attribute MUST NOT be + // used on the Profile signal since the data is already captured in 'message + // Function'. This constraint is imposed to prevent redundancy and maintain data + // integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "com.example.MyHttpService.serveRequest", + // "GuzzleHttp\Client::transfer", "fopen" + // Note: Values and format depends on each language runtime, thus it is + // impossible to provide an exhaustive list of examples. + // The values are usually the same (or prefixes of) the ones found in native + // stack trace representation stored in + // `code.stacktrace` without information on arguments. + // + // Examples: + // + // - Java method: `com.example.MyHttpService.serveRequest` + // - Java anonymous class method: `com.mycompany.Main$1.myMethod` + // - Java lambda method: + // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` + // - PHP function: `GuzzleHttp\Client::transfer` + // - Go function: `github.com/my/repo/pkg.foo.func5` + // - Elixir: `OpenTelemetry.Ctx.new` + // - Erlang: `opentelemetry_ctx:new` + // - Rust: `playground::my_module::my_cool_func` + // - C function: `fopen` + CodeFunctionNameKey = attribute.Key("code.function.name") + + // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" + // semantic conventions. It represents the line number in `code.file.path` best + // representing the operation. It SHOULD point within the code unit named in + // `code.function.name`. This attribute MUST NOT be used on the Profile signal + // since the data is already captured in 'message Line'. This constraint is + // imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeLineNumberKey = attribute.Key("code.line.number") + + // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" + // semantic conventions. It represents a stacktrace as a string in the natural + // representation for the language runtime. The representation is identical to + // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile + // signal since the data is already captured in 'message Location'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + // + // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumnNumber returns an attribute KeyValue conforming to the +// "code.column.number" semantic conventions. It represents the column number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeColumnNumber(val int) attribute.KeyValue { + return CodeColumnNumberKey.Int(val) +} + +// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" +// semantic conventions. It represents the source code file name that identifies +// the code unit as uniquely as possible (preferably an absolute file path). This +// attribute MUST NOT be used on the Profile signal since the data is already +// captured in 'message Function'. This constraint is imposed to prevent +// redundancy and maintain data integrity. +func CodeFilePath(val string) attribute.KeyValue { + return CodeFilePathKey.String(val) +} + +// CodeFunctionName returns an attribute KeyValue conforming to the +// "code.function.name" semantic conventions. It represents the method or +// function fully-qualified name without arguments. The value should fit the +// natural representation of the language runtime, which is also likely the same +// used within `code.stacktrace` attribute value. This attribute MUST NOT be used +// on the Profile signal since the data is already captured in 'message +// Function'. This constraint is imposed to prevent redundancy and maintain data +// integrity. +func CodeFunctionName(val string) attribute.KeyValue { + return CodeFunctionNameKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the +// "code.line.number" semantic conventions. It represents the line number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a string +// in the natural representation for the language runtime. The representation is +// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the +// Profile signal since the data is already captured in 'message Location'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +// +// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// Namespace: container +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used to + // run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol" + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol", "--config", "config.yaml" + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full command + // run by the container as a single string representing the full command. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol --config config.yaml" + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCSIPluginNameKey is the attribute Key conforming to the + // "container.csi.plugin.name" semantic conventions. It represents the name of + // the CSI ([Container Storage Interface]) plugin used by the volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pd.csi.storage.gke.io" + // Note: This can sometimes be referred to as a "driver" in CSI implementations. + // This should represent the `name` field of the GetPluginInfo RPC. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") + + // ContainerCSIVolumeIDKey is the attribute Key conforming to the + // "container.csi.volume.id" semantic conventions. It represents the unique + // volume ID returned by the CSI ([Container Storage Interface]) plugin. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" + // Note: This can sometimes be referred to as a "volume handle" in CSI + // implementations. This should represent the `Volume.volume_id` field in CSI + // spec. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") + + // ContainerIDKey is the attribute Key conforming to the "container.id" semantic + // conventions. It represents the container ID. Usually a UUID, as for example + // used to [identify Docker containers]. The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a3bf90e006b2" + // + // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime specific + // image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect [API] + // endpoint. + // K8s defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` + // . + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + // + // [API]: https://docs.docker.com/reference/api/engine/version/v1.43/#tag/Container/operation/ContainerInspect + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of the + // image the container was built on. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gcr.io/opentelemetry/operator" + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the repo + // digests of the container image as provided by the container runtime. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + // Note: [Docker] and [CRI] report those under the `RepoDigests` field. + // + // [Docker]: https://docs.docker.com/reference/api/engine/version/v1.43/#tag/Image/operation/ImageInspect + // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image Inspect]. Should be only + // the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v1.27.1", "3.5.7-0" + // + // [Docker Image Inspect]: https://docs.docker.com/reference/api/engine/version/v1.43/#tag/Image/operation/ImageInspect + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-autoconf" + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeDescriptionKey is the attribute Key conforming to the + // "container.runtime.description" semantic conventions. It represents a + // description about the runtime which could include, for example details about + // the CRI/API version being used or other customisations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker://19.3.1 - CRI: 1.22.0" + ContainerRuntimeDescriptionKey = attribute.Key("container.runtime.description") + + // ContainerRuntimeNameKey is the attribute Key conforming to the + // "container.runtime.name" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker", "containerd", "rkt" + ContainerRuntimeNameKey = attribute.Key("container.runtime.name") + + // ContainerRuntimeVersionKey is the attribute Key conforming to the + // "container.runtime.version" semantic conventions. It represents the version + // of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0.0 + ContainerRuntimeVersionKey = attribute.Key("container.runtime.version") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full command +// run by the container as a single string representing the full command. +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCSIPluginName returns an attribute KeyValue conforming to the +// "container.csi.plugin.name" semantic conventions. It represents the name of +// the CSI ([Container Storage Interface]) plugin used by the volume. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIPluginName(val string) attribute.KeyValue { + return ContainerCSIPluginNameKey.String(val) +} + +// ContainerCSIVolumeID returns an attribute KeyValue conforming to the +// "container.csi.volume.id" semantic conventions. It represents the unique +// volume ID returned by the CSI ([Container Storage Interface]) plugin. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIVolumeID(val string) attribute.KeyValue { + return ContainerCSIVolumeIDKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the "container.id" +// semantic conventions. It represents the container ID. Usually a UUID, as for +// example used to [identify Docker containers]. The UUID might be abbreviated. +// +// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime specific +// image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container image +// tags. An example can be found in [Docker Image Inspect]. Should be only the +// `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +// +// [Docker Image Inspect]: https://docs.docker.com/reference/api/engine/version/v1.43/#tag/Image/operation/ImageInspect +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerLabel returns an attribute KeyValue conforming to the +// "container.label" semantic conventions. It represents the container labels, +// `` being the label name, the value being the label value. +func ContainerLabel(key string, val string) attribute.KeyValue { + return attribute.String("container.label."+key, val) +} + +// ContainerName returns an attribute KeyValue conforming to the "container.name" +// semantic conventions. It represents the container name used by container +// runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntimeDescription returns an attribute KeyValue conforming to the +// "container.runtime.description" semantic conventions. It represents a +// description about the runtime which could include, for example details about +// the CRI/API version being used or other customisations. +func ContainerRuntimeDescription(val string) attribute.KeyValue { + return ContainerRuntimeDescriptionKey.String(val) +} + +// ContainerRuntimeName returns an attribute KeyValue conforming to the +// "container.runtime.name" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntimeName(val string) attribute.KeyValue { + return ContainerRuntimeNameKey.String(val) +} + +// ContainerRuntimeVersion returns an attribute KeyValue conforming to the +// "container.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ContainerRuntimeVersion(val string) attribute.KeyValue { + return ContainerRuntimeVersionKey.String(val) +} + +// Namespace: cpu +const ( + // CPULogicalNumberKey is the attribute Key conforming to the + // "cpu.logical_number" semantic conventions. It represents the logical CPU + // number [0..n-1]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + CPULogicalNumberKey = attribute.Key("cpu.logical_number") + + // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic + // conventions. It represents the mode of the CPU. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "user", "system" + CPUModeKey = attribute.Key("cpu.mode") +) + +// CPULogicalNumber returns an attribute KeyValue conforming to the +// "cpu.logical_number" semantic conventions. It represents the logical CPU +// number [0..n-1]. +func CPULogicalNumber(val int) attribute.KeyValue { + return CPULogicalNumberKey.Int(val) +} + +// Enum values for cpu.mode +var ( + // User + // Stability: development + CPUModeUser = CPUModeKey.String("user") + // System + // Stability: development + CPUModeSystem = CPUModeKey.String("system") + // Nice + // Stability: development + CPUModeNice = CPUModeKey.String("nice") + // Idle + // Stability: development + CPUModeIdle = CPUModeKey.String("idle") + // IO Wait + // Stability: development + CPUModeIOWait = CPUModeKey.String("iowait") + // Interrupt + // Stability: development + CPUModeInterrupt = CPUModeKey.String("interrupt") + // Steal + // Stability: development + CPUModeSteal = CPUModeKey.String("steal") + // Kernel + // Stability: development + CPUModeKernel = CPUModeKey.String("kernel") +) + +// Namespace: db +const ( + // DBClientConnectionPoolNameKey is the attribute Key conforming to the + // "db.client.connection.pool.name" semantic conventions. It represents the name + // of the connection pool; unique within the instrumented application. In case + // the connection pool implementation doesn't provide a name, instrumentation + // SHOULD use a combination of parameters that would make the name unique, for + // example, combining attributes `server.address`, `server.port`, and + // `db.namespace`, formatted as `server.address:server.port/db.namespace`. + // Instrumentations that generate connection pool name following different + // patterns SHOULD document it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myDataSource" + DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") + + // DBClientConnectionStateKey is the attribute Key conforming to the + // "db.client.connection.state" semantic conventions. It represents the state of + // a connection in the pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle" + DBClientConnectionStateKey = attribute.Key("db.client.connection.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "public.users", "customers" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The collection name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple collections + // in non-batch operations. + // + // For batch operations, if the individual operations are known to have the same + // collection name then that collection name SHOULD be used. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic + // conventions. It represents the name of the database, fully qualified within + // the server address and port. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "customers", "test.users" + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated from the most general to the most specific namespace component, + // using `|` as a separator between the components. Any missing components (and + // their associated separators) SHOULD be omitted. + // Semantic conventions for individual database systems SHOULD document what + // `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationBatchSizeKey is the attribute Key conforming to the + // "db.operation.batch.size" semantic conventions. It represents the number of + // queries included in a batch operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 2, 3, 4 + // Note: Operations are only considered batches when they contain two or more + // operations, and so `db.operation.batch.size` SHOULD never be `1`. + DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") + + // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" + // semantic conventions. It represents the name of the operation or command + // being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "findAndModify", "HMSET", "SELECT" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The operation name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple operations + // in non-batch operations. + // + // If spaces can occur in the operation name, multiple consecutive spaces + // SHOULD be normalized to a single space. + // + // For batch operations, if the individual operations are known to have the same + // operation name + // then that operation name SHOULD be used prepended by `BATCH `, + // otherwise `db.operation.name` SHOULD be `BATCH` or some other database + // system specific term if more applicable. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" + // semantic conventions. It represents the low cardinality summary of a database + // query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get + // user by id" + // Note: The query summary describes a class of database queries and is useful + // as a grouping key, especially when analyzing telemetry for database + // calls involving complex queries. + // + // Summary may be available to the instrumentation through + // instrumentation hooks or other means. If it is not available, + // instrumentations + // that support query parsing SHOULD generate a summary following + // [Generating query summary] + // section. + // + // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query + DBQuerySummaryKey = attribute.Key("db.query.summary") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" + // Note: For sanitization see [Sanitization of `db.query.text`]. + // For batch operations, if the individual operations are known to have the same + // query text then that query text SHOULD be used, otherwise all of the + // individual query texts SHOULD be concatenated with separator `; ` or some + // other database system specific separator if more applicable. + // Parameterized query text SHOULD NOT be sanitized. Even though parameterized + // query text can potentially have sensitive data, by using a parameterized + // query the user is giving a strong signal that any sensitive data will be + // passed as parameter values, and the benefit to observability of capturing the + // static part of the query text by default outweighs the risk. + // + // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext + DBQueryTextKey = attribute.Key("db.query.text") + + // DBResponseReturnedRowsKey is the attribute Key conforming to the + // "db.response.returned_rows" semantic conventions. It represents the number of + // rows returned by the operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10, 30, 1000 + DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") + + // DBResponseStatusCodeKey is the attribute Key conforming to the + // "db.response.status_code" semantic conventions. It represents the database + // response status code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "102", "ORA-17002", "08P01", "404" + // Note: The status code returned by the database. Usually it represents an + // error code, but may also represent partial success, warning, or differentiate + // between various types of successful outcomes. + // Semantic conventions for individual database systems SHOULD document what + // `db.response.status_code` means in the context of that system. + DBResponseStatusCodeKey = attribute.Key("db.response.status_code") + + // DBStoredProcedureNameKey is the attribute Key conforming to the + // "db.stored_procedure.name" semantic conventions. It represents the name of a + // stored procedure within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GetCustomer" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // For batch operations, if the individual operations are known to have the same + // stored procedure name then that stored procedure name SHOULD be used. + DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") + + // DBSystemNameKey is the attribute Key conforming to the "db.system.name" + // semantic conventions. It represents the database management system (DBMS) + // product as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the `db.system.name` is set to `postgresql` based on the instrumentation's + // best knowledge. + DBSystemNameKey = attribute.Key("db.system.name") +) + +// DBClientConnectionPoolName returns an attribute KeyValue conforming to the +// "db.client.connection.pool.name" semantic conventions. It represents the name +// of the connection pool; unique within the instrumented application. In case +// the connection pool implementation doesn't provide a name, instrumentation +// SHOULD use a combination of parameters that would make the name unique, for +// example, combining attributes `server.address`, `server.port`, and +// `db.namespace`, formatted as `server.address:server.port/db.namespace`. +// Instrumentations that generate connection pool name following different +// patterns SHOULD document it. +func DBClientConnectionPoolName(val string) attribute.KeyValue { + return DBClientConnectionPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" +// semantic conventions. It represents the name of the database, fully qualified +// within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationBatchSize returns an attribute KeyValue conforming to the +// "db.operation.batch.size" semantic conventions. It represents the number of +// queries included in a batch operation. +func DBOperationBatchSize(val int) attribute.KeyValue { + return DBOperationBatchSizeKey.Int(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBOperationParameter returns an attribute KeyValue conforming to the +// "db.operation.parameter" semantic conventions. It represents a database +// operation parameter, with `` being the parameter name, and the attribute +// value being a string representation of the parameter value. +func DBOperationParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.operation.parameter."+key, val) +} + +// DBQueryParameter returns an attribute KeyValue conforming to the +// "db.query.parameter" semantic conventions. It represents a database query +// parameter, with `` being the parameter name, and the attribute value +// being a string representation of the parameter value. +func DBQueryParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.query.parameter."+key, val) +} + +// DBQuerySummary returns an attribute KeyValue conforming to the +// "db.query.summary" semantic conventions. It represents the low cardinality +// summary of a database query. +func DBQuerySummary(val string) attribute.KeyValue { + return DBQuerySummaryKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" +// semantic conventions. It represents the database query being executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// DBResponseReturnedRows returns an attribute KeyValue conforming to the +// "db.response.returned_rows" semantic conventions. It represents the number of +// rows returned by the operation. +func DBResponseReturnedRows(val int) attribute.KeyValue { + return DBResponseReturnedRowsKey.Int(val) +} + +// DBResponseStatusCode returns an attribute KeyValue conforming to the +// "db.response.status_code" semantic conventions. It represents the database +// response status code. +func DBResponseStatusCode(val string) attribute.KeyValue { + return DBResponseStatusCodeKey.String(val) +} + +// DBStoredProcedureName returns an attribute KeyValue conforming to the +// "db.stored_procedure.name" semantic conventions. It represents the name of a +// stored procedure within the database. +func DBStoredProcedureName(val string) attribute.KeyValue { + return DBStoredProcedureNameKey.String(val) +} + +// Enum values for db.client.connection.state +var ( + // idle + // Stability: development + DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") + // used + // Stability: development + DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") +) + +// Enum values for db.system.name +var ( + // Some other SQL database. Fallback only. + // Stability: development + DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") + // [Adabas (Adaptable Database System)] + // Stability: development + // + // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas + DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") + // [Actian Ingres] + // Stability: development + // + // [Actian Ingres]: https://www.actian.com/databases/ingres/ + DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") + // [Amazon DynamoDB] + // Stability: development + // + // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ + DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") + // [Amazon Redshift] + // Stability: development + // + // [Amazon Redshift]: https://aws.amazon.com/redshift/ + DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") + // [Azure Cosmos DB] + // Stability: development + // + // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db + DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") + // [InterSystems Caché] + // Stability: development + // + // [InterSystems Caché]: https://www.intersystems.com/products/cache/ + DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") + // [Apache Cassandra] + // Stability: development + // + // [Apache Cassandra]: https://cassandra.apache.org/ + DBSystemNameCassandra = DBSystemNameKey.String("cassandra") + // [ClickHouse] + // Stability: development + // + // [ClickHouse]: https://clickhouse.com/ + DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") + // [CockroachDB] + // Stability: development + // + // [CockroachDB]: https://www.cockroachlabs.com/ + DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") + // [Couchbase] + // Stability: development + // + // [Couchbase]: https://www.couchbase.com/ + DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") + // [Apache CouchDB] + // Stability: development + // + // [Apache CouchDB]: https://couchdb.apache.org/ + DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") + // [Apache Derby] + // Stability: development + // + // [Apache Derby]: https://db.apache.org/derby/ + DBSystemNameDerby = DBSystemNameKey.String("derby") + // [Elasticsearch] + // Stability: development + // + // [Elasticsearch]: https://www.elastic.co/elasticsearch + DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") + // [Firebird] + // Stability: development + // + // [Firebird]: https://www.firebirdsql.org/ + DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") + // [Google Cloud Spanner] + // Stability: development + // + // [Google Cloud Spanner]: https://cloud.google.com/spanner + DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") + // [Apache Geode] + // Stability: development + // + // [Apache Geode]: https://geode.apache.org/ + DBSystemNameGeode = DBSystemNameKey.String("geode") + // [H2 Database] + // Stability: development + // + // [H2 Database]: https://h2database.com/ + DBSystemNameH2database = DBSystemNameKey.String("h2database") + // [Apache HBase] + // Stability: development + // + // [Apache HBase]: https://hbase.apache.org/ + DBSystemNameHBase = DBSystemNameKey.String("hbase") + // [Apache Hive] + // Stability: development + // + // [Apache Hive]: https://hive.apache.org/ + DBSystemNameHive = DBSystemNameKey.String("hive") + // [HyperSQL Database] + // Stability: development + // + // [HyperSQL Database]: https://hsqldb.org/ + DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") + // [IBM Db2] + // Stability: development + // + // [IBM Db2]: https://www.ibm.com/db2 + DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") + // [IBM Informix] + // Stability: development + // + // [IBM Informix]: https://www.ibm.com/products/informix + DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") + // [IBM Netezza] + // Stability: development + // + // [IBM Netezza]: https://www.ibm.com/products/netezza + DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") + // [InfluxDB] + // Stability: development + // + // [InfluxDB]: https://www.influxdata.com/ + DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") + // [Instant] + // Stability: development + // + // [Instant]: https://www.instantdb.com/ + DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") + // [MariaDB] + // Stability: stable + // + // [MariaDB]: https://mariadb.org/ + DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") + // [Memcached] + // Stability: development + // + // [Memcached]: https://memcached.org/ + DBSystemNameMemcached = DBSystemNameKey.String("memcached") + // [MongoDB] + // Stability: development + // + // [MongoDB]: https://www.mongodb.com/ + DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") + // [Microsoft SQL Server] + // Stability: stable + // + // [Microsoft SQL Server]: https://www.microsoft.com/sql-server + DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") + // [MySQL] + // Stability: stable + // + // [MySQL]: https://www.mysql.com/ + DBSystemNameMySQL = DBSystemNameKey.String("mysql") + // [Neo4j] + // Stability: development + // + // [Neo4j]: https://neo4j.com/ + DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") + // [OpenSearch] + // Stability: development + // + // [OpenSearch]: https://opensearch.org/ + DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") + // [Oracle Database] + // Stability: development + // + // [Oracle Database]: https://www.oracle.com/database/ + DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") + // [PostgreSQL] + // Stability: stable + // + // [PostgreSQL]: https://www.postgresql.org/ + DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") + // [Redis] + // Stability: development + // + // [Redis]: https://redis.io/ + DBSystemNameRedis = DBSystemNameKey.String("redis") + // [SAP HANA] + // Stability: development + // + // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html + DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") + // [SAP MaxDB] + // Stability: development + // + // [SAP MaxDB]: https://maxdb.sap.com/ + DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") + // [SQLite] + // Stability: development + // + // [SQLite]: https://www.sqlite.org/ + DBSystemNameSQLite = DBSystemNameKey.String("sqlite") + // [Teradata] + // Stability: development + // + // [Teradata]: https://www.teradata.com/ + DBSystemNameTeradata = DBSystemNameKey.String("teradata") + // [Trino] + // Stability: development + // + // [Trino]: https://trino.io/ + DBSystemNameTrino = DBSystemNameKey.String("trino") +) + +// Namespace: deployment +const ( + // DeploymentEnvironmentNameKey is the attribute Key conforming to the + // "deployment.environment.name" semantic conventions. It represents the name of + // the [deployment environment] (aka deployment tier). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "staging", "production" + // Note: `deployment.environment.name` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` resource + // attributes. + // This implies that resources carrying the following attribute combinations + // MUST be + // considered to be identifying the same service: + // + // - `service.name=frontend`, `deployment.environment.name=production` + // - `service.name=frontend`, `deployment.environment.name=staging`. + // + // + // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment + DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") + + // DeploymentIDKey is the attribute Key conforming to the "deployment.id" + // semantic conventions. It represents the id of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1208" + DeploymentIDKey = attribute.Key("deployment.id") + + // DeploymentNameKey is the attribute Key conforming to the "deployment.name" + // semantic conventions. It represents the name of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "deploy my app", "deploy-frontend" + DeploymentNameKey = attribute.Key("deployment.name") + + // DeploymentStatusKey is the attribute Key conforming to the + // "deployment.status" semantic conventions. It represents the status of the + // deployment. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + DeploymentStatusKey = attribute.Key("deployment.status") +) + +// DeploymentEnvironmentName returns an attribute KeyValue conforming to the +// "deployment.environment.name" semantic conventions. It represents the name of +// the [deployment environment] (aka deployment tier). +// +// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment +func DeploymentEnvironmentName(val string) attribute.KeyValue { + return DeploymentEnvironmentNameKey.String(val) +} + +// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" +// semantic conventions. It represents the id of the deployment. +func DeploymentID(val string) attribute.KeyValue { + return DeploymentIDKey.String(val) +} + +// DeploymentName returns an attribute KeyValue conforming to the +// "deployment.name" semantic conventions. It represents the name of the +// deployment. +func DeploymentName(val string) attribute.KeyValue { + return DeploymentNameKey.String(val) +} + +// Enum values for deployment.status +var ( + // failed + // Stability: development + DeploymentStatusFailed = DeploymentStatusKey.String("failed") + // succeeded + // Stability: development + DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") +) + +// Namespace: destination +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the destination + // address - domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the source side, and when communicating through an + // intermediary, `destination.address` SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the "destination.port" + // semantic conventions. It represents the destination port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number. +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Namespace: device +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123456789012345", "01:23:45:67:89:AB" + // Note: Its value SHOULD be identical for all apps on a device and it SHOULD + // NOT change if an app is uninstalled and re-installed. + // However, it might be resettable by the user for all apps on a device. + // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be + // used as values. + // + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. + // + // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution + // > should be taken when storing personal data or anything which can identify a + // > user. GDPR and data protection laws may apply, + // > ensure you do your own due diligence.> Due to these reasons, this + // > identifier is not recommended for consumer applications and will likely + // > result in rejection from both Google Play and App Store. + // > However, it may be appropriate for specific enterprise scenarios, such as + // > kiosk devices or enterprise-managed devices, with appropriate compliance + // > clearance. + // > Any instrumentation providing this identifier MUST implement it as an + // > opt-in feature.> See [`app.installation.id`]> for a more + // > privacy-preserving alternative. + // + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids + // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of the + // device manufacturer. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apple", "Samsung" + // Note: The Android OS provides this field via [Build]. iOS apps SHOULD + // hardcode the value `Apple`. + // + // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone3,4", "SM-G920F" + // Note: It's recommended this value represents a machine-readable version of + // the model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" + // semantic conventions. It represents the marketing name for the device model. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic +// conventions. It represents a unique identifier representing the device. +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer. +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device. +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name for +// the device model. +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// Namespace: disk +const ( + // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" + // semantic conventions. It represents the disk IO operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "read" + DiskIODirectionKey = attribute.Key("disk.io.direction") +) + +// Enum values for disk.io.direction +var ( + // read + // Stability: development + DiskIODirectionRead = DiskIODirectionKey.String("read") + // write + // Stability: development + DiskIODirectionWrite = DiskIODirectionKey.String("write") +) + +// Namespace: dns +const ( + // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic + // conventions. It represents the list of IPv4 or IPv6 addresses resolved during + // DNS lookup. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + DNSAnswersKey = attribute.Key("dns.answers") + + // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" + // semantic conventions. It represents the name being queried. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.example.com", "opentelemetry.io" + // Note: The name represents the queried domain name as it appears in the DNS + // query without any additional normalization. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers" +// semantic conventions. It represents the list of IPv4 or IPv6 addresses +// resolved during DNS lookup. +func DNSAnswers(val ...string) attribute.KeyValue { + return DNSAnswersKey.StringSlice(val) +} + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Namespace: elasticsearch +const ( + // ElasticsearchNodeNameKey is the attribute Key conforming to the + // "elasticsearch.node.name" semantic conventions. It represents the represents + // the human-readable identifier of the node/instance to which a request was + // routed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-0000000001" + ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") +) + +// ElasticsearchNodeName returns an attribute KeyValue conforming to the +// "elasticsearch.node.name" semantic conventions. It represents the represents +// the human-readable identifier of the node/instance to which a request was +// routed. +func ElasticsearchNodeName(val string) attribute.KeyValue { + return ElasticsearchNodeNameKey.String(val) +} + +// Namespace: enduser +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic + // conventions. It represents the unique identifier of an end user in the + // system. It maybe a username, email address, or other identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "username" + // Note: Unique identifier of an end user in the system. + // + // > [!Warning] + // > This field contains sensitive (PII) information. + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" + // semantic conventions. It represents the pseudonymous identifier of an end + // user. This identifier should be a random value that is not directly linked or + // associated with the end user's actual identity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "QdH5CAWJgqVT4rOr0qtumf" + // Note: Pseudonymous identifier of an end user. + // + // > [!Warning] + // > This field contains sensitive (linkable PII) information. + EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the unique identifier of an end user in +// the system. It maybe a username, email address, or other identifier. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserPseudoID returns an attribute KeyValue conforming to the +// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous +// identifier of an end user. This identifier should be a random value that is +// not directly linked or associated with the end user's actual identity. +func EnduserPseudoID(val string) attribute.KeyValue { + return EnduserPseudoIDKey.String(val) +} + +// Namespace: error +const ( + // ErrorMessageKey is the attribute Key conforming to the "error.message" + // semantic conventions. It represents a message providing more detail about an + // error in human-readable form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Unexpected input type: string", "The user has exceeded their + // storage quota" + // Note: `error.message` should provide additional context and detail about an + // error. + // It is NOT RECOMMENDED to duplicate the value of `error.type` in + // `error.message`. + // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in + // `error.message`. + // + // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded + // cardinality and overlap with span status. + ErrorMessageKey = attribute.Key("error.message") + + // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic + // conventions. It represents the describes a class of error the operation ended + // with. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "timeout", "java.net.UnknownHostException", + // "server_certificate_invalid", "500" + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library SHOULD be + // low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query time + // when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT set + // `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as HTTP + // or gRPC status codes), + // it's RECOMMENDED to: + // + // - Use a domain-specific attribute + // - Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +// ErrorMessage returns an attribute KeyValue conforming to the "error.message" +// semantic conventions. It represents a message providing more detail about an +// error in human-readable form. +func ErrorMessage(val string) attribute.KeyValue { + return ErrorMessageKey.String(val) +} + +// Enum values for error.type +var ( + // A fallback error value to be used when the instrumentation doesn't define a + // custom value. + // + // Stability: stable + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Namespace: exception +const ( + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: Exception in thread "main" java.lang.RuntimeException: Test + // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the exception + // should be preferred over the static type in languages that support it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "java.net.ConnectException", "OSError" + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the "exception.type" +// semantic conventions. It represents the type of the exception (its +// fully-qualified class name, if applicable). The dynamic type of the exception +// should be preferred over the static type in languages that support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Namespace: faas +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the serverless + // function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron Expression]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0/5 * * * ? * + // + // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name of + // the source on which the triggering operation was performed. For example, in + // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the + // database name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myBucketName", "myDbName" + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or S3 is + // the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myFile.txt", "myTableName" + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the describes + // the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string containing + // the time when the data was accessed in the [ISO 8601] format expressed in + // [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a string, + // that will be potentially reused for other invocations to the same + // function/function version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" + // Note: - **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation ID of + // the current function invocation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" + // semantic conventions. It represents the name of the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: my-function + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud region of + // the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: eu-central-1 + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" + // semantic conventions. It represents the amount of memory available to the + // serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information (which must be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this runtime + // instance executes. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-function", "myazurefunctionapp/some-function-name" + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function.name`] + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + // + // - **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + // + // + // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation time + // in the [ISO 8601] format expressed in [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic + // conventions. It represents the type of the trigger which caused this function + // invocation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic + // conventions. It represents the immutable version of the function being + // executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "26", "pinkfroid-00002" + // Note: Depending on the cloud provider and platform, use: + // + // - **AWS Lambda:** The [function version] + // (an integer represented as a decimal string). + // - **Google Cloud Run (Services):** The [revision] + // (i.e., the function name plus the revision suffix). + // - **Google Cloud Functions:** The value of the + // [`K_REVISION` environment variable]. + // - **Azure Functions:** Not applicable. Do not set this attribute. + // + // + // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html + // [revision]: https://cloud.google.com/run/docs/managing/revisions + // [`K_REVISION` environment variable]: https://cloud.google.com/run/docs/container-contract#services-env-vars + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" +// semantic conventions. It represents a boolean that is true if the serverless +// function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic +// conventions. It represents a string containing the schedule period as +// [Cron Expression]. +// +// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of the +// source on which the triggering operation was performed. For example, in Cloud +// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database +// name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 is +// the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO 8601] format expressed in +// [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" +// semantic conventions. It represents the execution environment ID as a string, +// that will be potentially reused for other invocations to the same +// function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID of +// the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region of +// the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic +// conventions. It represents the name of the single function that this runtime +// instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic +// conventions. It represents a string containing the function invocation time in +// the [ISO 8601] format expressed in [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" +// semantic conventions. It represents the immutable version of the function +// being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Enum values for faas.document.operation +var ( + // When a new object is created. + // Stability: development + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified. + // Stability: development + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted. + // Stability: development + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Enum values for faas.invoked_provider +var ( + // Alibaba Cloud + // Stability: development + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + // Stability: development + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + // Stability: development + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// Enum values for faas.trigger +var ( + // A response to some data source operation such as a database or filesystem + // read/write + // Stability: development + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + // Stability: development + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + // Stability: development + FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + // Stability: development + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + // Stability: development + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Namespace: feature_flag +const ( + // FeatureFlagContextIDKey is the attribute Key conforming to the + // "feature_flag.context.id" semantic conventions. It represents the unique + // identifier for the flag evaluation context. For example, the targeting key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" + FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") + + // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" + // semantic conventions. It represents the lookup key of the feature flag. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "logo-color" + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider.name" semantic conventions. It represents the + // identifies the feature flag provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "Flag Manager" + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") + + // FeatureFlagResultReasonKey is the attribute Key conforming to the + // "feature_flag.result.reason" semantic conventions. It represents the reason + // code which shows how a feature flag value was determined. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "static", "targeting_match", "error", "default" + FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") + + // FeatureFlagResultValueKey is the attribute Key conforming to the + // "feature_flag.result.value" semantic conventions. It represents the evaluated + // value of the feature flag. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "#ff0000", true, 3 + // Note: With some feature flag providers, feature flag results can be quite + // large or contain private or sensitive details. + // Because of this, `feature_flag.result.variant` is often the preferred + // attribute if it is available. + // + // It may be desirable to redact or otherwise limit the size and scope of + // `feature_flag.result.value` if possible. + // Because the evaluated flag value is unstructured and may be any type, it is + // left to the instrumentation author to determine how best to achieve this. + FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") + + // FeatureFlagResultVariantKey is the attribute Key conforming to the + // "feature_flag.result.variant" semantic conventions. It represents a semantic + // identifier for an evaluated flag value. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "red", "true", "on" + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") + + // FeatureFlagSetIDKey is the attribute Key conforming to the + // "feature_flag.set.id" semantic conventions. It represents the identifier of + // the [flag set] to which the feature flag belongs. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "proj-1", "ab98sgs", "service1/dev" + // + // [flag set]: https://openfeature.dev/specification/glossary/#flag-set + FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") + + // FeatureFlagVersionKey is the attribute Key conforming to the + // "feature_flag.version" semantic conventions. It represents the version of the + // ruleset used during the evaluation. This may be any stable value which + // uniquely identifies the ruleset. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "1", "01ABCDEF" + FeatureFlagVersionKey = attribute.Key("feature_flag.version") +) + +// FeatureFlagContextID returns an attribute KeyValue conforming to the +// "feature_flag.context.id" semantic conventions. It represents the unique +// identifier for the flag evaluation context. For example, the targeting key. +func FeatureFlagContextID(val string) attribute.KeyValue { + return FeatureFlagContextIDKey.String(val) +} + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the lookup key of the +// feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider.name" semantic conventions. It represents the +// identifies the feature flag provider. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagResultVariant returns an attribute KeyValue conforming to the +// "feature_flag.result.variant" semantic conventions. It represents a semantic +// identifier for an evaluated flag value. +func FeatureFlagResultVariant(val string) attribute.KeyValue { + return FeatureFlagResultVariantKey.String(val) +} + +// FeatureFlagSetID returns an attribute KeyValue conforming to the +// "feature_flag.set.id" semantic conventions. It represents the identifier of +// the [flag set] to which the feature flag belongs. +// +// [flag set]: https://openfeature.dev/specification/glossary/#flag-set +func FeatureFlagSetID(val string) attribute.KeyValue { + return FeatureFlagSetIDKey.String(val) +} + +// FeatureFlagVersion returns an attribute KeyValue conforming to the +// "feature_flag.version" semantic conventions. It represents the version of the +// ruleset used during the evaluation. This may be any stable value which +// uniquely identifies the ruleset. +func FeatureFlagVersion(val string) attribute.KeyValue { + return FeatureFlagVersionKey.String(val) +} + +// Enum values for feature_flag.result.reason +var ( + // The resolved value is static (no dynamic evaluation). + // Stability: release_candidate + FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") + // The resolved value fell back to a pre-configured value (no dynamic evaluation + // occurred or dynamic evaluation yielded no result). + // Stability: release_candidate + FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") + // The resolved value was the result of a dynamic evaluation, such as a rule or + // specific user-targeting. + // Stability: release_candidate + FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") + // The resolved value was the result of pseudorandom assignment. + // Stability: release_candidate + FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") + // The resolved value was retrieved from cache. + // Stability: release_candidate + FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") + // The resolved value was the result of the flag being disabled in the + // management system. + // Stability: release_candidate + FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") + // The reason for the resolved value could not be determined. + // Stability: release_candidate + FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") + // The resolved value is non-authoritative or possibly out of date + // Stability: release_candidate + FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") + // The resolved value was the result of an error. + // Stability: release_candidate + FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") +) + +// Namespace: file +const ( + // FileAccessedKey is the attribute Key conforming to the "file.accessed" + // semantic conventions. It represents the time when the file was last accessed, + // in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileAccessedKey = attribute.Key("file.accessed") + + // FileAttributesKey is the attribute Key conforming to the "file.attributes" + // semantic conventions. It represents the array of file attributes. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "readonly", "hidden" + // Note: Attributes names depend on the OS or file system. Here’s a + // non-exhaustive list of values expected for this attribute: `archive`, + // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, + // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, + // `write`. + FileAttributesKey = attribute.Key("file.attributes") + + // FileChangedKey is the attribute Key conforming to the "file.changed" semantic + // conventions. It represents the time when the file attributes or metadata was + // last changed, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: `file.changed` captures the time when any of the file's properties or + // attributes (including the content) are changed, while `file.modified` + // captures the timestamp when the file content is modified. + FileChangedKey = attribute.Key("file.changed") + + // FileCreatedKey is the attribute Key conforming to the "file.created" semantic + // conventions. It represents the time when the file was created, in ISO 8601 + // format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileCreatedKey = attribute.Key("file.created") + + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is located. + // It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/user", "C:\Program Files\MyApp" + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the leading + // dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileForkNameKey is the attribute Key conforming to the "file.fork_name" + // semantic conventions. It represents the name of the fork. A fork is + // additional data associated with a filesystem object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Zone.Identifier" + // Note: On Linux, a resource fork is used to store additional data with a + // filesystem object. A file always has at least one fork for the data portion, + // and additional forks may exist. + // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default + // data stream for a file is just called $DATA. Zone.Identifier is commonly used + // by Windows to track contents downloaded from the Internet. An ADS is + // typically of the form: C:\path\to\filename.extension:some_fork_name, and + // some_fork_name is the value that should populate `fork_name`. + // `filename.extension` should populate `file.name`, and `extension` should + // populate `file.extension`. The full path, `file.path`, will include the fork + // name. + FileForkNameKey = attribute.Key("file.fork_name") + + // FileGroupIDKey is the attribute Key conforming to the "file.group.id" + // semantic conventions. It represents the primary Group ID (GID) of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileGroupIDKey = attribute.Key("file.group.id") + + // FileGroupNameKey is the attribute Key conforming to the "file.group.name" + // semantic conventions. It represents the primary group name of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "users" + FileGroupNameKey = attribute.Key("file.group.name") + + // FileInodeKey is the attribute Key conforming to the "file.inode" semantic + // conventions. It represents the inode representing the file in the filesystem. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "256383" + FileInodeKey = attribute.Key("file.inode") + + // FileModeKey is the attribute Key conforming to the "file.mode" semantic + // conventions. It represents the mode of the file in octal representation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0640" + FileModeKey = attribute.Key("file.mode") + + // FileModifiedKey is the attribute Key conforming to the "file.modified" + // semantic conventions. It represents the time when the file content was last + // modified, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + FileModifiedKey = attribute.Key("file.modified") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.png" + FileNameKey = attribute.Key("file.name") + + // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" + // semantic conventions. It represents the user ID (UID) or security identifier + // (SID) of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileOwnerIDKey = attribute.Key("file.owner.id") + + // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" + // semantic conventions. It represents the username of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + FileOwnerNameKey = attribute.Key("file.owner.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FileSizeKey = attribute.Key("file.size") + + // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the + // "file.symbolic_link.target_path" semantic conventions. It represents the path + // to the target of a symbolic link. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/python3" + // Note: This attribute is only applicable to symbolic links. + FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") +) + +// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" +// semantic conventions. It represents the time when the file was last accessed, +// in ISO 8601 format. +func FileAccessed(val string) attribute.KeyValue { + return FileAccessedKey.String(val) +} + +// FileAttributes returns an attribute KeyValue conforming to the +// "file.attributes" semantic conventions. It represents the array of file +// attributes. +func FileAttributes(val ...string) attribute.KeyValue { + return FileAttributesKey.StringSlice(val) +} + +// FileChanged returns an attribute KeyValue conforming to the "file.changed" +// semantic conventions. It represents the time when the file attributes or +// metadata was last changed, in ISO 8601 format. +func FileChanged(val string) attribute.KeyValue { + return FileChangedKey.String(val) +} + +// FileCreated returns an attribute KeyValue conforming to the "file.created" +// semantic conventions. It represents the time when the file was created, in ISO +// 8601 format. +func FileCreated(val string) attribute.KeyValue { + return FileCreatedKey.String(val) +} + +// FileDirectory returns an attribute KeyValue conforming to the "file.directory" +// semantic conventions. It represents the directory where the file is located. +// It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the "file.extension" +// semantic conventions. It represents the file extension, excluding the leading +// dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" +// semantic conventions. It represents the name of the fork. A fork is additional +// data associated with a filesystem object. +func FileForkName(val string) attribute.KeyValue { + return FileForkNameKey.String(val) +} + +// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" +// semantic conventions. It represents the primary Group ID (GID) of the file. +func FileGroupID(val string) attribute.KeyValue { + return FileGroupIDKey.String(val) +} + +// FileGroupName returns an attribute KeyValue conforming to the +// "file.group.name" semantic conventions. It represents the primary group name +// of the file. +func FileGroupName(val string) attribute.KeyValue { + return FileGroupNameKey.String(val) +} + +// FileInode returns an attribute KeyValue conforming to the "file.inode" +// semantic conventions. It represents the inode representing the file in the +// filesystem. +func FileInode(val string) attribute.KeyValue { + return FileInodeKey.String(val) +} + +// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic +// conventions. It represents the mode of the file in octal representation. +func FileMode(val string) attribute.KeyValue { + return FileModeKey.String(val) +} + +// FileModified returns an attribute KeyValue conforming to the "file.modified" +// semantic conventions. It represents the time when the file content was last +// modified, in ISO 8601 format. +func FileModified(val string) attribute.KeyValue { + return FileModifiedKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" semantic +// conventions. It represents the name of the file including the extension, +// without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" +// semantic conventions. It represents the user ID (UID) or security identifier +// (SID) of the file owner. +func FileOwnerID(val string) attribute.KeyValue { + return FileOwnerIDKey.String(val) +} + +// FileOwnerName returns an attribute KeyValue conforming to the +// "file.owner.name" semantic conventions. It represents the username of the file +// owner. +func FileOwnerName(val string) attribute.KeyValue { + return FileOwnerNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" semantic +// conventions. It represents the full path to the file, including the file name. +// It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" semantic +// conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the +// "file.symbolic_link.target_path" semantic conventions. It represents the path +// to the target of a symbolic link. +func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { + return FileSymbolicLinkTargetPathKey.String(val) +} + +// Namespace: gcp +const ( + // GCPAppHubApplicationContainerKey is the attribute Key conforming to the + // "gcp.apphub.application.container" semantic conventions. It represents the + // container within GCP where the AppHub application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-container-project" + GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") + + // GCPAppHubApplicationIDKey is the attribute Key conforming to the + // "gcp.apphub.application.id" semantic conventions. It represents the name of + // the application as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-application" + GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") + + // GCPAppHubApplicationLocationKey is the attribute Key conforming to the + // "gcp.apphub.application.location" semantic conventions. It represents the GCP + // zone or region where the application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1" + GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") + + // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.criticality_type" semantic conventions. It represents the + // criticality of a service indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") + + // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.environment_type" semantic conventions. It represents the + // environment of a service is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") + + // GCPAppHubServiceIDKey is the attribute Key conforming to the + // "gcp.apphub.service.id" semantic conventions. It represents the name of the + // service as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") + + // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.criticality_type" semantic conventions. It represents + // the criticality of a workload indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") + + // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.environment_type" semantic conventions. It represents + // the environment of a workload is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") + + // GCPAppHubWorkloadIDKey is the attribute Key conforming to the + // "gcp.apphub.workload.id" semantic conventions. It represents the name of the + // workload as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-workload" + GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") + + // GCPAppHubDestinationApplicationContainerKey is the attribute Key conforming + // to the "gcp.apphub_destination.application.container" semantic conventions. + // It represents the container within GCP where the AppHub destination + // application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-container-project" + GCPAppHubDestinationApplicationContainerKey = attribute.Key("gcp.apphub_destination.application.container") + + // GCPAppHubDestinationApplicationIDKey is the attribute Key conforming to the + // "gcp.apphub_destination.application.id" semantic conventions. It represents + // the name of the destination application as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-application" + GCPAppHubDestinationApplicationIDKey = attribute.Key("gcp.apphub_destination.application.id") + + // GCPAppHubDestinationApplicationLocationKey is the attribute Key conforming to + // the "gcp.apphub_destination.application.location" semantic conventions. It + // represents the GCP zone or region where the destination application is + // defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1" + GCPAppHubDestinationApplicationLocationKey = attribute.Key("gcp.apphub_destination.application.location") + + // GCPAppHubDestinationServiceCriticalityTypeKey is the attribute Key conforming + // to the "gcp.apphub_destination.service.criticality_type" semantic + // conventions. It represents the criticality of a destination workload + // indicates its importance to the business as specified in [AppHub type enum]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubDestinationServiceCriticalityTypeKey = attribute.Key("gcp.apphub_destination.service.criticality_type") + + // GCPAppHubDestinationServiceEnvironmentTypeKey is the attribute Key conforming + // to the "gcp.apphub_destination.service.environment_type" semantic + // conventions. It represents the software lifecycle stage of a destination + // service as defined [AppHub environment type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubDestinationServiceEnvironmentTypeKey = attribute.Key("gcp.apphub_destination.service.environment_type") + + // GCPAppHubDestinationServiceIDKey is the attribute Key conforming to the + // "gcp.apphub_destination.service.id" semantic conventions. It represents the + // name of the destination service as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + GCPAppHubDestinationServiceIDKey = attribute.Key("gcp.apphub_destination.service.id") + + // GCPAppHubDestinationWorkloadCriticalityTypeKey is the attribute Key + // conforming to the "gcp.apphub_destination.workload.criticality_type" semantic + // conventions. It represents the criticality of a destination workload + // indicates its importance to the business as specified in [AppHub type enum]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubDestinationWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub_destination.workload.criticality_type") + + // GCPAppHubDestinationWorkloadEnvironmentTypeKey is the attribute Key + // conforming to the "gcp.apphub_destination.workload.environment_type" semantic + // conventions. It represents the environment of a destination workload is the + // stage of a software lifecycle as provided in the [AppHub environment type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubDestinationWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub_destination.workload.environment_type") + + // GCPAppHubDestinationWorkloadIDKey is the attribute Key conforming to the + // "gcp.apphub_destination.workload.id" semantic conventions. It represents the + // name of the destination workload as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-workload" + GCPAppHubDestinationWorkloadIDKey = attribute.Key("gcp.apphub_destination.workload.id") + + // GCPClientServiceKey is the attribute Key conforming to the + // "gcp.client.service" semantic conventions. It represents the identifies the + // Google Cloud service for which the official client library is intended. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "appengine", "run", "firestore", "alloydb", "spanner" + // Note: Intended to be a stable identifier for Google Cloud client libraries + // that is uniform across implementation languages. The value should be derived + // from the canonical service domain for the service; for example, + // 'foo.googleapis.com' should result in a value of 'foo'. + GCPClientServiceKey = attribute.Key("gcp.client.service") + + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of + // the Cloud Run [execution] being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`] environment variable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "job-name-xxxx", "sample-job-mdw84" + // + // [execution]: https://cloud.google.com/run/docs/managing/job-executions + // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index + // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] + // environment variable. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1 + // + // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") + + // GCPGCEInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname + // of a GCE instance. This is the full value of the default or [custom hostname] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-host1234.example.com", + // "sample-vm.us-west1-b.c.my-project.internal" + // + // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm + GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGCEInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance name + // of a GCE instance. This is the value provided by `host.name`, the visible + // name of the instance in the Cloud Console UI, and the prefix for the default + // hostname of the instance as defined by the [default internal DNS name]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-1", "my-vm-name" + // + // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names + GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the +// "gcp.apphub.application.container" semantic conventions. It represents the +// container within GCP where the AppHub application is defined. +func GCPAppHubApplicationContainer(val string) attribute.KeyValue { + return GCPAppHubApplicationContainerKey.String(val) +} + +// GCPAppHubApplicationID returns an attribute KeyValue conforming to the +// "gcp.apphub.application.id" semantic conventions. It represents the name of +// the application as configured in AppHub. +func GCPAppHubApplicationID(val string) attribute.KeyValue { + return GCPAppHubApplicationIDKey.String(val) +} + +// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the +// "gcp.apphub.application.location" semantic conventions. It represents the GCP +// zone or region where the application is defined. +func GCPAppHubApplicationLocation(val string) attribute.KeyValue { + return GCPAppHubApplicationLocationKey.String(val) +} + +// GCPAppHubServiceID returns an attribute KeyValue conforming to the +// "gcp.apphub.service.id" semantic conventions. It represents the name of the +// service as configured in AppHub. +func GCPAppHubServiceID(val string) attribute.KeyValue { + return GCPAppHubServiceIDKey.String(val) +} + +// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the +// "gcp.apphub.workload.id" semantic conventions. It represents the name of the +// workload as configured in AppHub. +func GCPAppHubWorkloadID(val string) attribute.KeyValue { + return GCPAppHubWorkloadIDKey.String(val) +} + +// GCPAppHubDestinationApplicationContainer returns an attribute KeyValue +// conforming to the "gcp.apphub_destination.application.container" semantic +// conventions. It represents the container within GCP where the AppHub +// destination application is defined. +func GCPAppHubDestinationApplicationContainer(val string) attribute.KeyValue { + return GCPAppHubDestinationApplicationContainerKey.String(val) +} + +// GCPAppHubDestinationApplicationID returns an attribute KeyValue conforming to +// the "gcp.apphub_destination.application.id" semantic conventions. It +// represents the name of the destination application as configured in AppHub. +func GCPAppHubDestinationApplicationID(val string) attribute.KeyValue { + return GCPAppHubDestinationApplicationIDKey.String(val) +} + +// GCPAppHubDestinationApplicationLocation returns an attribute KeyValue +// conforming to the "gcp.apphub_destination.application.location" semantic +// conventions. It represents the GCP zone or region where the destination +// application is defined. +func GCPAppHubDestinationApplicationLocation(val string) attribute.KeyValue { + return GCPAppHubDestinationApplicationLocationKey.String(val) +} + +// GCPAppHubDestinationServiceID returns an attribute KeyValue conforming to the +// "gcp.apphub_destination.service.id" semantic conventions. It represents the +// name of the destination service as configured in AppHub. +func GCPAppHubDestinationServiceID(val string) attribute.KeyValue { + return GCPAppHubDestinationServiceIDKey.String(val) +} + +// GCPAppHubDestinationWorkloadID returns an attribute KeyValue conforming to the +// "gcp.apphub_destination.workload.id" semantic conventions. It represents the +// name of the destination workload as configured in AppHub. +func GCPAppHubDestinationWorkloadID(val string) attribute.KeyValue { + return GCPAppHubDestinationWorkloadIDKey.String(val) +} + +// GCPClientService returns an attribute KeyValue conforming to the +// "gcp.client.service" semantic conventions. It represents the identifies the +// Google Cloud service for which the official client library is intended. +func GCPClientService(val string) attribute.KeyValue { + return GCPClientServiceKey.String(val) +} + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of +// the Cloud Run [execution] being run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`] environment variable. +// +// [execution]: https://cloud.google.com/run/docs/managing/job-executions +// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] +// environment variable. +// +// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom hostname] +// . +// +// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm +func GCPGCEInstanceHostname(val string) attribute.KeyValue { + return GCPGCEInstanceHostnameKey.String(val) +} + +// GCPGCEInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance name +// of a GCE instance. This is the value provided by `host.name`, the visible name +// of the instance in the Cloud Console UI, and the prefix for the default +// hostname of the instance as defined by the [default internal DNS name]. +// +// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names +func GCPGCEInstanceName(val string) attribute.KeyValue { + return GCPGCEInstanceNameKey.String(val) +} + +// Enum values for gcp.apphub.service.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.service.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Enum values for gcp.apphub.workload.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.workload.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Enum values for gcp.apphub_destination.service.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubDestinationServiceCriticalityTypeMissionCritical = GCPAppHubDestinationServiceCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubDestinationServiceCriticalityTypeHigh = GCPAppHubDestinationServiceCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubDestinationServiceCriticalityTypeMedium = GCPAppHubDestinationServiceCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubDestinationServiceCriticalityTypeLow = GCPAppHubDestinationServiceCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub_destination.service.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubDestinationServiceEnvironmentTypeProduction = GCPAppHubDestinationServiceEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubDestinationServiceEnvironmentTypeStaging = GCPAppHubDestinationServiceEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubDestinationServiceEnvironmentTypeTest = GCPAppHubDestinationServiceEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubDestinationServiceEnvironmentTypeDevelopment = GCPAppHubDestinationServiceEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Enum values for gcp.apphub_destination.workload.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubDestinationWorkloadCriticalityTypeMissionCritical = GCPAppHubDestinationWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubDestinationWorkloadCriticalityTypeHigh = GCPAppHubDestinationWorkloadCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubDestinationWorkloadCriticalityTypeMedium = GCPAppHubDestinationWorkloadCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubDestinationWorkloadCriticalityTypeLow = GCPAppHubDestinationWorkloadCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub_destination.workload.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubDestinationWorkloadEnvironmentTypeProduction = GCPAppHubDestinationWorkloadEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubDestinationWorkloadEnvironmentTypeStaging = GCPAppHubDestinationWorkloadEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubDestinationWorkloadEnvironmentTypeTest = GCPAppHubDestinationWorkloadEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubDestinationWorkloadEnvironmentTypeDevelopment = GCPAppHubDestinationWorkloadEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Namespace: gen_ai +const ( + // GenAIAgentDescriptionKey is the attribute Key conforming to the + // "gen_ai.agent.description" semantic conventions. It represents the free-form + // description of the GenAI agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Helps with math problems", "Generates fiction stories" + GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") + + // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" + // semantic conventions. It represents the unique identifier of the GenAI agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" + GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") + + // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" + // semantic conventions. It represents the human-readable name of the GenAI + // agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Math Tutor", "Fiction Writer" + GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") + + // GenAIConversationIDKey is the attribute Key conforming to the + // "gen_ai.conversation.id" semantic conventions. It represents the unique + // identifier for a conversation (session, thread), used to store and correlate + // messages within this conversation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" + GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") + + // GenAIDataSourceIDKey is the attribute Key conforming to the + // "gen_ai.data_source.id" semantic conventions. It represents the data source + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "H7STPQYOND" + // Note: Data sources are used by AI agents and RAG applications to store + // grounding data. A data source may be an external database, object store, + // document collection, website, or any other storage system used by the GenAI + // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier + // used by the GenAI system rather than a name specific to the external storage, + // such as a database or object store. Semantic conventions referencing + // `gen_ai.data_source.id` MAY also leverage additional attributes, such as + // `db.*`, to further identify and describe the data source. + GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") + + // GenAIEmbeddingsDimensionCountKey is the attribute Key conforming to the + // "gen_ai.embeddings.dimension.count" semantic conventions. It represents the + // number of dimensions the resulting output embeddings should have. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 512, 1024 + GenAIEmbeddingsDimensionCountKey = attribute.Key("gen_ai.embeddings.dimension.count") + + // GenAIEvaluationExplanationKey is the attribute Key conforming to the + // "gen_ai.evaluation.explanation" semantic conventions. It represents a + // free-form explanation for the assigned score provided by the evaluator. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "The response is factually accurate but lacks sufficient detail to + // fully address the question." + GenAIEvaluationExplanationKey = attribute.Key("gen_ai.evaluation.explanation") + + // GenAIEvaluationNameKey is the attribute Key conforming to the + // "gen_ai.evaluation.name" semantic conventions. It represents the name of the + // evaluation metric used for the GenAI response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Relevance", "IntentResolution" + GenAIEvaluationNameKey = attribute.Key("gen_ai.evaluation.name") + + // GenAIEvaluationScoreLabelKey is the attribute Key conforming to the + // "gen_ai.evaluation.score.label" semantic conventions. It represents the human + // readable label for evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "relevant", "not_relevant", "correct", "incorrect", "pass", "fail" + // Note: This attribute provides a human-readable interpretation of the + // evaluation score produced by an evaluator. For example, a score value of 1 + // could mean "relevant" in one evaluation system and "not relevant" in another, + // depending on the scoring range and evaluator. The label SHOULD have low + // cardinality. Possible values depend on the evaluation metric and evaluator + // used; implementations SHOULD document the possible values. + GenAIEvaluationScoreLabelKey = attribute.Key("gen_ai.evaluation.score.label") + + // GenAIEvaluationScoreValueKey is the attribute Key conforming to the + // "gen_ai.evaluation.score.value" semantic conventions. It represents the + // evaluation score returned by the evaluator. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 4.0 + GenAIEvaluationScoreValueKey = attribute.Key("gen_ai.evaluation.score.value") + + // GenAIInputMessagesKey is the attribute Key conforming to the + // "gen_ai.input.messages" semantic conventions. It represents the chat history + // provided to the model as an input. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "role": "user",\n "parts": [\n {\n "type": "text",\n + // "content": "Weather in Paris?"\n }\n ]\n },\n {\n "role": "assistant",\n + // "parts": [\n {\n "type": "tool_call",\n "id": + // "call_VSPygqKTWdrhaFErNvMV18Yl",\n "name": "get_weather",\n "arguments": {\n + // "location": "Paris"\n }\n }\n ]\n },\n {\n "role": "tool",\n "parts": [\n {\n + // "type": "tool_call_response",\n "id": " call_VSPygqKTWdrhaFErNvMV18Yl",\n + // "result": "rainy, 57°F"\n }\n ]\n }\n]\n" + // Note: Instrumentations MUST follow [Input messages JSON schema]. + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Messages MUST be provided in the order they were sent to the model. + // Instrumentations MAY provide a way for users to filter or truncate + // input messages. + // + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. + // + // See [Recording content on attributes] + // section for more details. + // + // [Input messages JSON schema]: /docs/gen-ai/gen-ai-input-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIInputMessagesKey = attribute.Key("gen_ai.input.messages") + + // GenAIOperationNameKey is the attribute Key conforming to the + // "gen_ai.operation.name" semantic conventions. It represents the name of the + // operation being performed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If one of the predefined values applies, but specific system uses a + // different name it's RECOMMENDED to document it in the semantic conventions + // for specific GenAI system and use system-specific name in the + // instrumentation. If a different name is not documented, instrumentation + // libraries SHOULD use applicable predefined value. + GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") + + // GenAIOutputMessagesKey is the attribute Key conforming to the + // "gen_ai.output.messages" semantic conventions. It represents the messages + // returned by the model where each message represents a specific model response + // (choice, candidate). + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "role": "assistant",\n "parts": [\n {\n "type": "text",\n + // "content": "The weather in Paris is currently rainy with a temperature of + // 57°F."\n }\n ],\n "finish_reason": "stop"\n }\n]\n" + // Note: Instrumentations MUST follow [Output messages JSON schema] + // + // Each message represents a single output choice/candidate generated by + // the model. Each message corresponds to exactly one generation + // (choice/candidate) and vice versa - one choice cannot be split across + // multiple messages or one message cannot contain parts from multiple choices. + // + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // output messages. + // + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. + // + // See [Recording content on attributes] + // section for more details. + // + // [Output messages JSON schema]: /docs/gen-ai/gen-ai-output-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIOutputMessagesKey = attribute.Key("gen_ai.output.messages") + + // GenAIOutputTypeKey is the attribute Key conforming to the + // "gen_ai.output.type" semantic conventions. It represents the represents the + // content type requested by the client. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute SHOULD be used when the client requests output of a + // specific type. The model may return zero or more outputs of this type. + // This attribute specifies the output modality and not the actual output + // format. For example, if an image is requested, the actual output could be a + // URL pointing to an image file. + // Additional output format details may be recorded in the future in the + // `gen_ai.output.{type}.*` attributes. + GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") + + // GenAIProviderNameKey is the attribute Key conforming to the + // "gen_ai.provider.name" semantic conventions. It represents the Generative AI + // provider as identified by the client or server instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The attribute SHOULD be set based on the instrumentation's best + // knowledge and may differ from the actual model provider. + // + // Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms + // are accessible using the OpenAI REST API and corresponding client libraries, + // but may proxy or host models from different providers. + // + // The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address` + // attributes may help identify the actual system in use. + // + // The `gen_ai.provider.name` attribute acts as a discriminator that + // identifies the GenAI telemetry format flavor specific to that provider + // within GenAI semantic conventions. + // It SHOULD be set consistently with provider-specific attributes and signals. + // For example, GenAI spans, metrics, and events related to AWS Bedrock + // should have the `gen_ai.provider.name` set to `aws.bedrock` and include + // applicable `aws.bedrock.*` attributes and are not expected to include + // `openai.*` attributes. + GenAIProviderNameKey = attribute.Key("gen_ai.provider.name") + + // GenAIRequestChoiceCountKey is the attribute Key conforming to the + // "gen_ai.request.choice.count" semantic conventions. It represents the target + // number of candidate completions to return. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3 + GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") + + // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the + // "gen_ai.request.encoding_formats" semantic conventions. It represents the + // encoding formats requested in an embeddings operation, if specified. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "base64"], ["float", "binary" + // Note: In some GenAI systems the encoding formats are called embedding types. + // Also, some GenAI systems only accept a single format per request. + GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") + + // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the + // "gen_ai.request.frequency_penalty" semantic conventions. It represents the + // frequency penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") + + // GenAIRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum + // number of tokens the model generates for a request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAIRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of the + // GenAI model a request is being made to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: gpt-4 + GenAIRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the + // "gen_ai.request.presence_penalty" semantic conventions. It represents the + // presence penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") + + // GenAIRequestSeedKey is the attribute Key conforming to the + // "gen_ai.request.seed" semantic conventions. It represents the requests with + // same seed value more likely to return same result. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") + + // GenAIRequestStopSequencesKey is the attribute Key conforming to the + // "gen_ai.request.stop_sequences" semantic conventions. It represents the list + // of sequences that the model will use to stop generating further tokens. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "forest", "lived" + GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") + + // GenAIRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.0 + GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAIRequestTopKKey is the attribute Key conforming to the + // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") + + // GenAIRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAIResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "stop"], ["stop", "length" + GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAIResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "chatcmpl-123" + GenAIResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAIResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of the + // model that generated the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gpt-4-0613" + GenAIResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAISystemInstructionsKey is the attribute Key conforming to the + // "gen_ai.system_instructions" semantic conventions. It represents the system + // message or instructions provided to the GenAI model separately from the chat + // history. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "type": "text",\n "content": "You are an Agent that greet + // users, always use greetings tool to respond"\n }\n]\n", "[\n {\n "type": + // "text",\n "content": "You are a language translator."\n },\n {\n "type": + // "text",\n "content": "Your mission is to translate text in English to + // French."\n }\n]\n" + // Note: This attribute SHOULD be used when the corresponding provider or API + // allows to provide system instructions or messages separately from the + // chat history. + // + // Instructions that are part of the chat history SHOULD be recorded in + // `gen_ai.input.messages` attribute instead. + // + // Instrumentations MUST follow [System instructions JSON schema]. + // + // When recorded on spans, it MAY be recorded as a JSON string if structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // system instructions. + // + // > [!Warning] + // > This attribute may contain sensitive information. + // + // See [Recording content on attributes] + // section for more details. + // + // [System instructions JSON schema]: /docs/gen-ai/gen-ai-system-instructions.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAISystemInstructionsKey = attribute.Key("gen_ai.system_instructions") + + // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" + // semantic conventions. It represents the type of token being counted. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "input", "output" + GenAITokenTypeKey = attribute.Key("gen_ai.token.type") + + // GenAIToolCallArgumentsKey is the attribute Key conforming to the + // "gen_ai.tool.call.arguments" semantic conventions. It represents the + // parameters passed to the tool call. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{\n "location": "San Francisco?",\n "date": "2025-10-01"\n}\n" + // Note: > [!WARNING] + // + // > This attribute may contain sensitive information. + // + // It's expected to be an object - in case a serialized string is available + // to the instrumentation, the instrumentation SHOULD do the best effort to + // deserialize it to an object. When recorded on spans, it MAY be recorded as a + // JSON string if structured format is not supported and SHOULD be recorded in + // structured form otherwise. + GenAIToolCallArgumentsKey = attribute.Key("gen_ai.tool.call.arguments") + + // GenAIToolCallIDKey is the attribute Key conforming to the + // "gen_ai.tool.call.id" semantic conventions. It represents the tool call + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" + GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") + + // GenAIToolCallResultKey is the attribute Key conforming to the + // "gen_ai.tool.call.result" semantic conventions. It represents the result + // returned by the tool call (if any and if execution was successful). + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{\n "temperature_range": {\n "high": 75,\n "low": 60\n },\n + // "conditions": "sunny"\n}\n" + // Note: > [!WARNING] + // + // > This attribute may contain sensitive information. + // + // It's expected to be an object - in case a serialized string is available + // to the instrumentation, the instrumentation SHOULD do the best effort to + // deserialize it to an object. When recorded on spans, it MAY be recorded as a + // JSON string if structured format is not supported and SHOULD be recorded in + // structured form otherwise. + GenAIToolCallResultKey = attribute.Key("gen_ai.tool.call.result") + + // GenAIToolDefinitionsKey is the attribute Key conforming to the + // "gen_ai.tool.definitions" semantic conventions. It represents the list of + // source system tool definitions available to the GenAI agent or model. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "type": "function",\n "name": "get_current_weather",\n + // "description": "Get the current weather in a given location",\n "parameters": + // {\n "type": "object",\n "properties": {\n "location": {\n "type": "string",\n + // "description": "The city and state, e.g. San Francisco, CA"\n },\n "unit": + // {\n "type": "string",\n "enum": [\n "celsius",\n "fahrenheit"\n ]\n }\n },\n + // "required": [\n "location",\n "unit"\n ]\n }\n }\n]\n" + // Note: The value of this attribute matches source system tool definition + // format. + // + // It's expected to be an array of objects where each object represents a tool + // definition. In case a serialized string is available + // to the instrumentation, the instrumentation SHOULD do the best effort to + // deserialize it to an array. When recorded on spans, it MAY be recorded as a + // JSON string if structured format is not supported and SHOULD be recorded in + // structured form otherwise. + // + // Since this attribute could be large, it's NOT RECOMMENDED to populate + // it by default. Instrumentations MAY provide a way to enable + // populating this attribute. + GenAIToolDefinitionsKey = attribute.Key("gen_ai.tool.definitions") + + // GenAIToolDescriptionKey is the attribute Key conforming to the + // "gen_ai.tool.description" semantic conventions. It represents the tool + // description. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Multiply two numbers" + GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") + + // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" + // semantic conventions. It represents the name of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flights" + GenAIToolNameKey = attribute.Key("gen_ai.tool.name") + + // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" + // semantic conventions. It represents the type of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "function", "extension", "datastore" + // Note: Extension: A tool executed on the agent-side to directly call external + // APIs, bridging the gap between the agent and real-world systems. + // Agent-side operations involve actions that are performed by the agent on the + // server or within the agent's controlled environment. + // Function: A tool executed on the client-side, where the agent generates + // parameters for a predefined function, and the client executes the logic. + // Client-side operations are actions taken on the user's end or within the + // client application. + // Datastore: A tool used by the agent to access and query structured or + // unstructured external data for retrieval-augmented tasks or knowledge + // updates. + GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") + + // GenAIUsageInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of + // tokens used in the GenAI input (prompt). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") + + // GenAIUsageOutputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.output_tokens" semantic conventions. It represents the number + // of tokens used in the GenAI response (completion). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 180 + GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") +) + +// GenAIAgentDescription returns an attribute KeyValue conforming to the +// "gen_ai.agent.description" semantic conventions. It represents the free-form +// description of the GenAI agent provided by the application. +func GenAIAgentDescription(val string) attribute.KeyValue { + return GenAIAgentDescriptionKey.String(val) +} + +// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" +// semantic conventions. It represents the unique identifier of the GenAI agent. +func GenAIAgentID(val string) attribute.KeyValue { + return GenAIAgentIDKey.String(val) +} + +// GenAIAgentName returns an attribute KeyValue conforming to the +// "gen_ai.agent.name" semantic conventions. It represents the human-readable +// name of the GenAI agent provided by the application. +func GenAIAgentName(val string) attribute.KeyValue { + return GenAIAgentNameKey.String(val) +} + +// GenAIConversationID returns an attribute KeyValue conforming to the +// "gen_ai.conversation.id" semantic conventions. It represents the unique +// identifier for a conversation (session, thread), used to store and correlate +// messages within this conversation. +func GenAIConversationID(val string) attribute.KeyValue { + return GenAIConversationIDKey.String(val) +} + +// GenAIDataSourceID returns an attribute KeyValue conforming to the +// "gen_ai.data_source.id" semantic conventions. It represents the data source +// identifier. +func GenAIDataSourceID(val string) attribute.KeyValue { + return GenAIDataSourceIDKey.String(val) +} + +// GenAIEmbeddingsDimensionCount returns an attribute KeyValue conforming to the +// "gen_ai.embeddings.dimension.count" semantic conventions. It represents the +// number of dimensions the resulting output embeddings should have. +func GenAIEmbeddingsDimensionCount(val int) attribute.KeyValue { + return GenAIEmbeddingsDimensionCountKey.Int(val) +} + +// GenAIEvaluationExplanation returns an attribute KeyValue conforming to the +// "gen_ai.evaluation.explanation" semantic conventions. It represents a +// free-form explanation for the assigned score provided by the evaluator. +func GenAIEvaluationExplanation(val string) attribute.KeyValue { + return GenAIEvaluationExplanationKey.String(val) +} + +// GenAIEvaluationName returns an attribute KeyValue conforming to the +// "gen_ai.evaluation.name" semantic conventions. It represents the name of the +// evaluation metric used for the GenAI response. +func GenAIEvaluationName(val string) attribute.KeyValue { + return GenAIEvaluationNameKey.String(val) +} + +// GenAIEvaluationScoreLabel returns an attribute KeyValue conforming to the +// "gen_ai.evaluation.score.label" semantic conventions. It represents the human +// readable label for evaluation. +func GenAIEvaluationScoreLabel(val string) attribute.KeyValue { + return GenAIEvaluationScoreLabelKey.String(val) +} + +// GenAIEvaluationScoreValue returns an attribute KeyValue conforming to the +// "gen_ai.evaluation.score.value" semantic conventions. It represents the +// evaluation score returned by the evaluator. +func GenAIEvaluationScoreValue(val float64) attribute.KeyValue { + return GenAIEvaluationScoreValueKey.Float64(val) +} + +// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the +// "gen_ai.request.choice.count" semantic conventions. It represents the target +// number of candidate completions to return. +func GenAIRequestChoiceCount(val int) attribute.KeyValue { + return GenAIRequestChoiceCountKey.Int(val) +} + +// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the +// "gen_ai.request.encoding_formats" semantic conventions. It represents the +// encoding formats requested in an embeddings operation, if specified. +func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { + return GenAIRequestEncodingFormatsKey.StringSlice(val) +} + +// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.frequency_penalty" semantic conventions. It represents the +// frequency penalty setting for the GenAI request. +func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { + return GenAIRequestFrequencyPenaltyKey.Float64(val) +} + +// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the model generates for a request. +func GenAIRequestMaxTokens(val int) attribute.KeyValue { + return GenAIRequestMaxTokensKey.Int(val) +} + +// GenAIRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// GenAI model a request is being made to. +func GenAIRequestModel(val string) attribute.KeyValue { + return GenAIRequestModelKey.String(val) +} + +// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.presence_penalty" semantic conventions. It represents the +// presence penalty setting for the GenAI request. +func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { + return GenAIRequestPresencePenaltyKey.Float64(val) +} + +// GenAIRequestSeed returns an attribute KeyValue conforming to the +// "gen_ai.request.seed" semantic conventions. It represents the requests with +// same seed value more likely to return same result. +func GenAIRequestSeed(val int) attribute.KeyValue { + return GenAIRequestSeedKey.Int(val) +} + +// GenAIRequestStopSequences returns an attribute KeyValue conforming to the +// "gen_ai.request.stop_sequences" semantic conventions. It represents the list +// of sequences that the model will use to stop generating further tokens. +func GenAIRequestStopSequences(val ...string) attribute.KeyValue { + return GenAIRequestStopSequencesKey.StringSlice(val) +} + +// GenAIRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the GenAI request. +func GenAIRequestTemperature(val float64) attribute.KeyValue { + return GenAIRequestTemperatureKey.Float64(val) +} + +// GenAIRequestTopK returns an attribute KeyValue conforming to the +// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling +// setting for the GenAI request. +func GenAIRequestTopK(val float64) attribute.KeyValue { + return GenAIRequestTopKKey.Float64(val) +} + +// GenAIRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling +// setting for the GenAI request. +func GenAIRequestTopP(val float64) attribute.KeyValue { + return GenAIRequestTopPKey.Float64(val) +} + +// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the +// "gen_ai.response.finish_reasons" semantic conventions. It represents the array +// of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAIResponseFinishReasonsKey.StringSlice(val) +} + +// GenAIResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique identifier +// for the completion. +func GenAIResponseID(val string) attribute.KeyValue { + return GenAIResponseIDKey.String(val) +} + +// GenAIResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// model that generated the response. +func GenAIResponseModel(val string) attribute.KeyValue { + return GenAIResponseModelKey.String(val) +} + +// GenAIToolCallID returns an attribute KeyValue conforming to the +// "gen_ai.tool.call.id" semantic conventions. It represents the tool call +// identifier. +func GenAIToolCallID(val string) attribute.KeyValue { + return GenAIToolCallIDKey.String(val) +} + +// GenAIToolDescription returns an attribute KeyValue conforming to the +// "gen_ai.tool.description" semantic conventions. It represents the tool +// description. +func GenAIToolDescription(val string) attribute.KeyValue { + return GenAIToolDescriptionKey.String(val) +} + +// GenAIToolName returns an attribute KeyValue conforming to the +// "gen_ai.tool.name" semantic conventions. It represents the name of the tool +// utilized by the agent. +func GenAIToolName(val string) attribute.KeyValue { + return GenAIToolNameKey.String(val) +} + +// GenAIToolType returns an attribute KeyValue conforming to the +// "gen_ai.tool.type" semantic conventions. It represents the type of the tool +// utilized by the agent. +func GenAIToolType(val string) attribute.KeyValue { + return GenAIToolTypeKey.String(val) +} + +// GenAIUsageInputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI input (prompt). +func GenAIUsageInputTokens(val int) attribute.KeyValue { + return GenAIUsageInputTokensKey.Int(val) +} + +// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI response (completion). +func GenAIUsageOutputTokens(val int) attribute.KeyValue { + return GenAIUsageOutputTokensKey.Int(val) +} + +// Enum values for gen_ai.operation.name +var ( + // Chat completion operation such as [OpenAI Chat API] + // Stability: development + // + // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat + GenAIOperationNameChat = GenAIOperationNameKey.String("chat") + // Multimodal content generation operation such as [Gemini Generate Content] + // Stability: development + // + // [Gemini Generate Content]: https://ai.google.dev/api/generate-content + GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") + // Text completions operation such as [OpenAI Completions API (Legacy)] + // Stability: development + // + // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions + GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") + // Embeddings operation such as [OpenAI Create embeddings API] + // Stability: development + // + // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create + GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") + // Create GenAI agent + // Stability: development + GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") + // Invoke GenAI agent + // Stability: development + GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") + // Execute a tool + // Stability: development + GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") +) + +// Enum values for gen_ai.output.type +var ( + // Plain text + // Stability: development + GenAIOutputTypeText = GenAIOutputTypeKey.String("text") + // JSON object with known or unknown schema + // Stability: development + GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") + // Image + // Stability: development + GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") + // Speech + // Stability: development + GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") +) + +// Enum values for gen_ai.provider.name +var ( + // [OpenAI] + // Stability: development + // + // [OpenAI]: https://openai.com/ + GenAIProviderNameOpenAI = GenAIProviderNameKey.String("openai") + // Any Google generative AI endpoint + // Stability: development + GenAIProviderNameGCPGenAI = GenAIProviderNameKey.String("gcp.gen_ai") + // [Vertex AI] + // Stability: development + // + // [Vertex AI]: https://cloud.google.com/vertex-ai + GenAIProviderNameGCPVertexAI = GenAIProviderNameKey.String("gcp.vertex_ai") + // [Gemini] + // Stability: development + // + // [Gemini]: https://cloud.google.com/products/gemini + GenAIProviderNameGCPGemini = GenAIProviderNameKey.String("gcp.gemini") + // [Anthropic] + // Stability: development + // + // [Anthropic]: https://www.anthropic.com/ + GenAIProviderNameAnthropic = GenAIProviderNameKey.String("anthropic") + // [Cohere] + // Stability: development + // + // [Cohere]: https://cohere.com/ + GenAIProviderNameCohere = GenAIProviderNameKey.String("cohere") + // Azure AI Inference + // Stability: development + GenAIProviderNameAzureAIInference = GenAIProviderNameKey.String("azure.ai.inference") + // [Azure OpenAI] + // Stability: development + // + // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/ + GenAIProviderNameAzureAIOpenAI = GenAIProviderNameKey.String("azure.ai.openai") + // [IBM Watsonx AI] + // Stability: development + // + // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai + GenAIProviderNameIBMWatsonxAI = GenAIProviderNameKey.String("ibm.watsonx.ai") + // [AWS Bedrock] + // Stability: development + // + // [AWS Bedrock]: https://aws.amazon.com/bedrock + GenAIProviderNameAWSBedrock = GenAIProviderNameKey.String("aws.bedrock") + // [Perplexity] + // Stability: development + // + // [Perplexity]: https://www.perplexity.ai/ + GenAIProviderNamePerplexity = GenAIProviderNameKey.String("perplexity") + // [xAI] + // Stability: development + // + // [xAI]: https://x.ai/ + GenAIProviderNameXAI = GenAIProviderNameKey.String("x_ai") + // [DeepSeek] + // Stability: development + // + // [DeepSeek]: https://www.deepseek.com/ + GenAIProviderNameDeepseek = GenAIProviderNameKey.String("deepseek") + // [Groq] + // Stability: development + // + // [Groq]: https://groq.com/ + GenAIProviderNameGroq = GenAIProviderNameKey.String("groq") + // [Mistral AI] + // Stability: development + // + // [Mistral AI]: https://mistral.ai/ + GenAIProviderNameMistralAI = GenAIProviderNameKey.String("mistral_ai") +) + +// Enum values for gen_ai.token.type +var ( + // Input tokens (prompt, input, etc.) + // Stability: development + GenAITokenTypeInput = GenAITokenTypeKey.String("input") + // Output tokens (completion, response, etc.) + // Stability: development + GenAITokenTypeOutput = GenAITokenTypeKey.String("output") +) + +// Namespace: geo +const ( + // GeoContinentCodeKey is the attribute Key conforming to the + // "geo.continent.code" semantic conventions. It represents the two-letter code + // representing continent’s name. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + GeoContinentCodeKey = attribute.Key("geo.continent.code") + + // GeoCountryISOCodeKey is the attribute Key conforming to the + // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO + // Country Code ([ISO 3166-1 alpha2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA" + // + // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes + GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") + + // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" + // semantic conventions. It represents the locality name. Represents the name of + // a city, town, village, or similar populated place. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Montreal", "Berlin" + GeoLocalityNameKey = attribute.Key("geo.locality.name") + + // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" + // semantic conventions. It represents the latitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 45.505918 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLatKey = attribute.Key("geo.location.lat") + + // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" + // semantic conventions. It represents the longitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -73.61483 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLonKey = attribute.Key("geo.location.lon") + + // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" + // semantic conventions. It represents the postal code associated with the + // location. Values appropriate for this field may also be known as a postcode + // or ZIP code and will vary widely from country to country. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "94040" + GeoPostalCodeKey = attribute.Key("geo.postal_code") + + // GeoRegionISOCodeKey is the attribute Key conforming to the + // "geo.region.iso_code" semantic conventions. It represents the region ISO code + // ([ISO 3166-2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA-QC" + // + // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 + GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") +) + +// GeoCountryISOCode returns an attribute KeyValue conforming to the +// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO +// Country Code ([ISO 3166-1 alpha2]). +// +// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes +func GeoCountryISOCode(val string) attribute.KeyValue { + return GeoCountryISOCodeKey.String(val) +} + +// GeoLocalityName returns an attribute KeyValue conforming to the +// "geo.locality.name" semantic conventions. It represents the locality name. +// Represents the name of a city, town, village, or similar populated place. +func GeoLocalityName(val string) attribute.KeyValue { + return GeoLocalityNameKey.String(val) +} + +// GeoLocationLat returns an attribute KeyValue conforming to the +// "geo.location.lat" semantic conventions. It represents the latitude of the geo +// location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLat(val float64) attribute.KeyValue { + return GeoLocationLatKey.Float64(val) +} + +// GeoLocationLon returns an attribute KeyValue conforming to the +// "geo.location.lon" semantic conventions. It represents the longitude of the +// geo location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLon(val float64) attribute.KeyValue { + return GeoLocationLonKey.Float64(val) +} + +// GeoPostalCode returns an attribute KeyValue conforming to the +// "geo.postal_code" semantic conventions. It represents the postal code +// associated with the location. Values appropriate for this field may also be +// known as a postcode or ZIP code and will vary widely from country to country. +func GeoPostalCode(val string) attribute.KeyValue { + return GeoPostalCodeKey.String(val) +} + +// GeoRegionISOCode returns an attribute KeyValue conforming to the +// "geo.region.iso_code" semantic conventions. It represents the region ISO code +// ([ISO 3166-2]). +// +// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 +func GeoRegionISOCode(val string) attribute.KeyValue { + return GeoRegionISOCodeKey.String(val) +} + +// Enum values for geo.continent.code +var ( + // Africa + // Stability: development + GeoContinentCodeAf = GeoContinentCodeKey.String("AF") + // Antarctica + // Stability: development + GeoContinentCodeAn = GeoContinentCodeKey.String("AN") + // Asia + // Stability: development + GeoContinentCodeAs = GeoContinentCodeKey.String("AS") + // Europe + // Stability: development + GeoContinentCodeEu = GeoContinentCodeKey.String("EU") + // North America + // Stability: development + GeoContinentCodeNa = GeoContinentCodeKey.String("NA") + // Oceania + // Stability: development + GeoContinentCodeOc = GeoContinentCodeKey.String("OC") + // South America + // Stability: development + GeoContinentCodeSa = GeoContinentCodeKey.String("SA") +) + +// Namespace: go +const ( + // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" + // semantic conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "other", "stack" + GoMemoryTypeKey = attribute.Key("go.memory.type") +) + +// Enum values for go.memory.type +var ( + // Memory allocated from the heap that is reserved for stack space, whether or + // not it is currently in-use. + // Stability: development + GoMemoryTypeStack = GoMemoryTypeKey.String("stack") + // Memory used by the Go runtime, excluding other categories of memory usage + // described in this enumeration. + // Stability: development + GoMemoryTypeOther = GoMemoryTypeKey.String("other") +) + +// Namespace: graphql +const ( + // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" + // semantic conventions. It represents the GraphQL document being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: query findBookById { bookById(id: ?) { name } } + // Note: The value may be sanitized to exclude sensitive information. + GraphQLDocumentKey = attribute.Key("graphql.document") + + // GraphQLOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of the + // operation being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: findBookById + GraphQLOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphQLOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of the + // operation being executed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "query", "mutation", "subscription" + GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") +) + +// GraphQLDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphQLDocument(val string) attribute.KeyValue { + return GraphQLDocumentKey.String(val) +} + +// GraphQLOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphQLOperationName(val string) attribute.KeyValue { + return GraphQLOperationNameKey.String(val) +} + +// Enum values for graphql.operation.type +var ( + // GraphQL query + // Stability: development + GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") + // GraphQL mutation + // Stability: development + GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") + // GraphQL subscription + // Stability: development + GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") +) + +// Namespace: heroku +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit hash + // for the current release. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents the + // time and date the release was created. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2022-10-23T18:00:42Z" + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" +// semantic conventions. It represents the unique identifier for the application. +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release. +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the +// "heroku.release.creation_timestamp" semantic conventions. It represents the +// time and date the release was created. +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// Namespace: host +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is running + // on. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of + // level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" + // semantic conventions. It represents the family or generation of the CPU. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "PA-RISC 1.1e" + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" + // semantic conventions. It represents the model identifier. It provides more + // granular information about the CPU, distinguishing it from other CPUs within + // the same family. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "9000/778/B180L" + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" + // semantic conventions. It represents the stepping or core revisions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "r1p1" + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "GenuineIntel" + // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX + // registers. Writing these to memory in this order results in a 12-character + // string. + // + // [CPUID]: https://wiki.osdev.org/CPUID + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be the + // instance_id assigned by the cloud provider. For non-containerized systems, + // this should be the `machine-id`. See the table below for the sources to use + // to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fdbf79e8af94cb7f9e8df36789187052" + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the VM image ID or host OS image ID. For + // Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ami-07b06b442921831e5" + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the "host.image.name" + // semantic conventions. It represents the name of the VM image or OS install + // the host was instantiated from. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version string + // of the VM image or host OS as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC 5952] format. + // + // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as + // hyphen-separated octets in uppercase hexadecimal form from most to least + // significant. + // + // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified hostname, + // or another name specified by the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-test" + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "n1-standard-1" + HostTypeKey = attribute.Key("host.type") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or generation +// of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model identifier. +// It provides more granular information about the CPU, distinguishing it from +// other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use to +// determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the "host.image.id" +// semantic conventions. It represents the VM image ID or host OS image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM image +// or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string of +// the VM image or host OS as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic +// conventions. It represents the available MAC addresses of the host, excluding +// loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" semantic +// conventions. It represents the name of the host. On Unix systems, it may +// contain what the hostname command returns, or the fully qualified hostname, or +// another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" semantic +// conventions. It represents the type of host. For Cloud, this must be the +// machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Enum values for host.arch +var ( + // AMD64 + // Stability: development + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + // Stability: development + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + // Stability: development + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + // Stability: development + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + // Stability: development + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + // Stability: development + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + // Stability: development + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + // Stability: development + HostArchX86 = HostArchKey.String("x86") +) + +// Namespace: http +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of the + // HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "idle" + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of the + // request payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the HTTP request + // method. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GET", "POST", "HEAD" + // Note: HTTP request method value SHOULD be "known" to the instrumentation. + // By default, this convention defines "known" methods as the ones listed in + // [RFC9110], + // the PATCH method defined in [RFC5789] + // and the QUERY method defined in [httpbis-safe-method-w-body]. + // + // If the HTTP request method is not known to instrumentation, it MUST set the + // `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + // + // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods + // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html + // [httpbis-safe-method-w-body]: https://datatracker.ietf.org/doc/draft-ietf-httpbis-safe-method-w-body/?include_text=1 + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GeT", "ACL", "foo" + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including redirects). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, + // or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" + // semantic conventions. It represents the total size of the request in bytes. + // This should be the total number of bytes sent over the wire, including the + // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request + // body if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size of the + // response payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size of + // the response in bytes. This should be the total number of bytes sent over the + // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), + // headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status code]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 200 + // + // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic + // conventions. It represents the matched route template for the request. This + // MUST be low-cardinality and include all static path segments, with dynamic + // path segments represented with placeholders. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/users/:userID?", "my-controller/my-action/{id?}" + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the [application root] if there is one. + // + // A static path segment is a part of the route template with a fixed, + // low-cardinality value. This includes literal strings like `/users/` and + // placeholders that + // are constrained to a finite, predefined set of values, e.g. `{controller}` or + // `{action}`. + // + // A dynamic path segment is a placeholder for a value that can have high + // cardinality and is not constrained to a predefined list like static path + // segments. + // + // Instrumentations SHOULD use routing information provided by the corresponding + // web framework. They SHOULD pick the most precise source of routing + // information and MAY + // support custom route formatting. Instrumentations SHOULD document the format + // and the API used to obtain the route string. + // + // [application root]: /docs/http/http-spans.md#http-server-definitions + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestHeader returns an attribute KeyValue conforming to the +// "http.request.header" semantic conventions. It represents the HTTP request +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPRequestHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.request.header."+key, val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of the +// request in bytes. This should be the total number of bytes sent over the wire, +// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, +// and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of the +// response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseHeader returns an attribute KeyValue conforming to the +// "http.response.header" semantic conventions. It represents the HTTP response +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPResponseHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.response.header."+key, val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of the +// response in bytes. This should be the total number of bytes sent over the +// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route template for the +// request. This MUST be low-cardinality and include all static path segments, +// with dynamic path segments represented with placeholders. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Enum values for http.connection.state +var ( + // active state. + // Stability: development + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state. + // Stability: development + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +// Enum values for http.request.method +var ( + // CONNECT method. + // Stability: stable + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method. + // Stability: stable + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method. + // Stability: stable + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method. + // Stability: stable + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method. + // Stability: stable + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method. + // Stability: stable + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method. + // Stability: stable + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method. + // Stability: stable + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method. + // Stability: stable + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // QUERY method. + // Stability: development + HTTPRequestMethodQuery = HTTPRequestMethodKey.String("QUERY") + // Any HTTP method that the instrumentation has no prior knowledge of. + // Stability: stable + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// Namespace: hw +const ( + // HwBatteryCapacityKey is the attribute Key conforming to the + // "hw.battery.capacity" semantic conventions. It represents the design capacity + // in Watts-hours or Amper-hours. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9.3Ah", "50Wh" + HwBatteryCapacityKey = attribute.Key("hw.battery.capacity") + + // HwBatteryChemistryKey is the attribute Key conforming to the + // "hw.battery.chemistry" semantic conventions. It represents the battery + // [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Li-ion", "NiMH" + // + // [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html + HwBatteryChemistryKey = attribute.Key("hw.battery.chemistry") + + // HwBatteryStateKey is the attribute Key conforming to the "hw.battery.state" + // semantic conventions. It represents the current state of the battery. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwBatteryStateKey = attribute.Key("hw.battery.state") + + // HwBiosVersionKey is the attribute Key conforming to the "hw.bios_version" + // semantic conventions. It represents the BIOS version of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + HwBiosVersionKey = attribute.Key("hw.bios_version") + + // HwDriverVersionKey is the attribute Key conforming to the "hw.driver_version" + // semantic conventions. It represents the driver version for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.2.1-3" + HwDriverVersionKey = attribute.Key("hw.driver_version") + + // HwEnclosureTypeKey is the attribute Key conforming to the "hw.enclosure.type" + // semantic conventions. It represents the type of the enclosure (useful for + // modular systems). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Computer", "Storage", "Switch" + HwEnclosureTypeKey = attribute.Key("hw.enclosure.type") + + // HwFirmwareVersionKey is the attribute Key conforming to the + // "hw.firmware_version" semantic conventions. It represents the firmware + // version of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0.1" + HwFirmwareVersionKey = attribute.Key("hw.firmware_version") + + // HwGpuTaskKey is the attribute Key conforming to the "hw.gpu.task" semantic + // conventions. It represents the type of task the GPU is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwGpuTaskKey = attribute.Key("hw.gpu.task") + + // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. + // It represents an identifier for the hardware component, unique within the + // monitored host. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "win32battery_battery_testsysa33_1" + HwIDKey = attribute.Key("hw.id") + + // HwLimitTypeKey is the attribute Key conforming to the "hw.limit_type" + // semantic conventions. It represents the type of limit for hardware + // components. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLimitTypeKey = attribute.Key("hw.limit_type") + + // HwLogicalDiskRaidLevelKey is the attribute Key conforming to the + // "hw.logical_disk.raid_level" semantic conventions. It represents the RAID + // Level of the logical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "RAID0+1", "RAID5", "RAID10" + HwLogicalDiskRaidLevelKey = attribute.Key("hw.logical_disk.raid_level") + + // HwLogicalDiskStateKey is the attribute Key conforming to the + // "hw.logical_disk.state" semantic conventions. It represents the state of the + // logical disk space usage. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLogicalDiskStateKey = attribute.Key("hw.logical_disk.state") + + // HwMemoryTypeKey is the attribute Key conforming to the "hw.memory.type" + // semantic conventions. It represents the type of the memory module. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "DDR4", "DDR5", "LPDDR5" + HwMemoryTypeKey = attribute.Key("hw.memory.type") + + // HwModelKey is the attribute Key conforming to the "hw.model" semantic + // conventions. It represents the descriptive model name of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "PERC H740P", "Intel(R) Core(TM) i7-10700K", "Dell XPS 15 Battery" + HwModelKey = attribute.Key("hw.model") + + // HwNameKey is the attribute Key conforming to the "hw.name" semantic + // conventions. It represents an easily-recognizable name for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "eth0" + HwNameKey = attribute.Key("hw.name") + + // HwNetworkLogicalAddressesKey is the attribute Key conforming to the + // "hw.network.logical_addresses" semantic conventions. It represents the + // logical addresses of the adapter (e.g. IP address, or WWPN). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "172.16.8.21", "57.11.193.42" + HwNetworkLogicalAddressesKey = attribute.Key("hw.network.logical_addresses") + + // HwNetworkPhysicalAddressKey is the attribute Key conforming to the + // "hw.network.physical_address" semantic conventions. It represents the + // physical address of the adapter (e.g. MAC address, or WWNN). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00-90-F5-E9-7B-36" + HwNetworkPhysicalAddressKey = attribute.Key("hw.network.physical_address") + + // HwParentKey is the attribute Key conforming to the "hw.parent" semantic + // conventions. It represents the unique identifier of the parent component + // (typically the `hw.id` attribute of the enclosure, or disk controller). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dellStorage_perc_0" + HwParentKey = attribute.Key("hw.parent") + + // HwPhysicalDiskSmartAttributeKey is the attribute Key conforming to the + // "hw.physical_disk.smart_attribute" semantic conventions. It represents the + // [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute + // of the physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Spin Retry Count", "Seek Error Rate", "Raw Read Error Rate" + // + // [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. + HwPhysicalDiskSmartAttributeKey = attribute.Key("hw.physical_disk.smart_attribute") + + // HwPhysicalDiskStateKey is the attribute Key conforming to the + // "hw.physical_disk.state" semantic conventions. It represents the state of the + // physical disk endurance utilization. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwPhysicalDiskStateKey = attribute.Key("hw.physical_disk.state") + + // HwPhysicalDiskTypeKey is the attribute Key conforming to the + // "hw.physical_disk.type" semantic conventions. It represents the type of the + // physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "HDD", "SSD", "10K" + HwPhysicalDiskTypeKey = attribute.Key("hw.physical_disk.type") + + // HwSensorLocationKey is the attribute Key conforming to the + // "hw.sensor_location" semantic conventions. It represents the location of the + // sensor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpu0", "ps1", "INLET", "CPU0_DIE", "AMBIENT", "MOTHERBOARD", "PS0 + // V3_3", "MAIN_12V", "CPU_VCORE" + HwSensorLocationKey = attribute.Key("hw.sensor_location") + + // HwSerialNumberKey is the attribute Key conforming to the "hw.serial_number" + // semantic conventions. It represents the serial number of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CNFCP0123456789" + HwSerialNumberKey = attribute.Key("hw.serial_number") + + // HwStateKey is the attribute Key conforming to the "hw.state" semantic + // conventions. It represents the current state of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwStateKey = attribute.Key("hw.state") + + // HwTapeDriveOperationTypeKey is the attribute Key conforming to the + // "hw.tape_drive.operation_type" semantic conventions. It represents the type + // of tape drive operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwTapeDriveOperationTypeKey = attribute.Key("hw.tape_drive.operation_type") + + // HwTypeKey is the attribute Key conforming to the "hw.type" semantic + // conventions. It represents the type of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: Describes the category of the hardware component for which `hw.state` + // is being reported. For example, `hw.type=temperature` along with + // `hw.state=degraded` would indicate that the temperature of the hardware + // component has been reported as `degraded`. + HwTypeKey = attribute.Key("hw.type") + + // HwVendorKey is the attribute Key conforming to the "hw.vendor" semantic + // conventions. It represents the vendor name of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Dell", "HP", "Intel", "AMD", "LSI", "Lenovo" + HwVendorKey = attribute.Key("hw.vendor") +) + +// HwBatteryCapacity returns an attribute KeyValue conforming to the +// "hw.battery.capacity" semantic conventions. It represents the design capacity +// in Watts-hours or Amper-hours. +func HwBatteryCapacity(val string) attribute.KeyValue { + return HwBatteryCapacityKey.String(val) +} + +// HwBatteryChemistry returns an attribute KeyValue conforming to the +// "hw.battery.chemistry" semantic conventions. It represents the battery +// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. +// +// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html +func HwBatteryChemistry(val string) attribute.KeyValue { + return HwBatteryChemistryKey.String(val) +} + +// HwBiosVersion returns an attribute KeyValue conforming to the +// "hw.bios_version" semantic conventions. It represents the BIOS version of the +// hardware component. +func HwBiosVersion(val string) attribute.KeyValue { + return HwBiosVersionKey.String(val) +} + +// HwDriverVersion returns an attribute KeyValue conforming to the +// "hw.driver_version" semantic conventions. It represents the driver version for +// the hardware component. +func HwDriverVersion(val string) attribute.KeyValue { + return HwDriverVersionKey.String(val) +} + +// HwEnclosureType returns an attribute KeyValue conforming to the +// "hw.enclosure.type" semantic conventions. It represents the type of the +// enclosure (useful for modular systems). +func HwEnclosureType(val string) attribute.KeyValue { + return HwEnclosureTypeKey.String(val) +} + +// HwFirmwareVersion returns an attribute KeyValue conforming to the +// "hw.firmware_version" semantic conventions. It represents the firmware version +// of the hardware component. +func HwFirmwareVersion(val string) attribute.KeyValue { + return HwFirmwareVersionKey.String(val) +} + +// HwID returns an attribute KeyValue conforming to the "hw.id" semantic +// conventions. It represents an identifier for the hardware component, unique +// within the monitored host. +func HwID(val string) attribute.KeyValue { + return HwIDKey.String(val) +} + +// HwLogicalDiskRaidLevel returns an attribute KeyValue conforming to the +// "hw.logical_disk.raid_level" semantic conventions. It represents the RAID +// Level of the logical disk. +func HwLogicalDiskRaidLevel(val string) attribute.KeyValue { + return HwLogicalDiskRaidLevelKey.String(val) +} + +// HwMemoryType returns an attribute KeyValue conforming to the "hw.memory.type" +// semantic conventions. It represents the type of the memory module. +func HwMemoryType(val string) attribute.KeyValue { + return HwMemoryTypeKey.String(val) +} + +// HwModel returns an attribute KeyValue conforming to the "hw.model" semantic +// conventions. It represents the descriptive model name of the hardware +// component. +func HwModel(val string) attribute.KeyValue { + return HwModelKey.String(val) +} + +// HwName returns an attribute KeyValue conforming to the "hw.name" semantic +// conventions. It represents an easily-recognizable name for the hardware +// component. +func HwName(val string) attribute.KeyValue { + return HwNameKey.String(val) +} + +// HwNetworkLogicalAddresses returns an attribute KeyValue conforming to the +// "hw.network.logical_addresses" semantic conventions. It represents the logical +// addresses of the adapter (e.g. IP address, or WWPN). +func HwNetworkLogicalAddresses(val ...string) attribute.KeyValue { + return HwNetworkLogicalAddressesKey.StringSlice(val) +} + +// HwNetworkPhysicalAddress returns an attribute KeyValue conforming to the +// "hw.network.physical_address" semantic conventions. It represents the physical +// address of the adapter (e.g. MAC address, or WWNN). +func HwNetworkPhysicalAddress(val string) attribute.KeyValue { + return HwNetworkPhysicalAddressKey.String(val) +} + +// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic +// conventions. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func HwParent(val string) attribute.KeyValue { + return HwParentKey.String(val) +} + +// HwPhysicalDiskSmartAttribute returns an attribute KeyValue conforming to the +// "hw.physical_disk.smart_attribute" semantic conventions. It represents the +// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute +// of the physical disk. +// +// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. +func HwPhysicalDiskSmartAttribute(val string) attribute.KeyValue { + return HwPhysicalDiskSmartAttributeKey.String(val) +} + +// HwPhysicalDiskType returns an attribute KeyValue conforming to the +// "hw.physical_disk.type" semantic conventions. It represents the type of the +// physical disk. +func HwPhysicalDiskType(val string) attribute.KeyValue { + return HwPhysicalDiskTypeKey.String(val) +} + +// HwSensorLocation returns an attribute KeyValue conforming to the +// "hw.sensor_location" semantic conventions. It represents the location of the +// sensor. +func HwSensorLocation(val string) attribute.KeyValue { + return HwSensorLocationKey.String(val) +} + +// HwSerialNumber returns an attribute KeyValue conforming to the +// "hw.serial_number" semantic conventions. It represents the serial number of +// the hardware component. +func HwSerialNumber(val string) attribute.KeyValue { + return HwSerialNumberKey.String(val) +} + +// HwVendor returns an attribute KeyValue conforming to the "hw.vendor" semantic +// conventions. It represents the vendor name of the hardware component. +func HwVendor(val string) attribute.KeyValue { + return HwVendorKey.String(val) +} + +// Enum values for hw.battery.state +var ( + // Charging + // Stability: development + HwBatteryStateCharging = HwBatteryStateKey.String("charging") + // Discharging + // Stability: development + HwBatteryStateDischarging = HwBatteryStateKey.String("discharging") +) + +// Enum values for hw.gpu.task +var ( + // Decoder + // Stability: development + HwGpuTaskDecoder = HwGpuTaskKey.String("decoder") + // Encoder + // Stability: development + HwGpuTaskEncoder = HwGpuTaskKey.String("encoder") + // General + // Stability: development + HwGpuTaskGeneral = HwGpuTaskKey.String("general") +) + +// Enum values for hw.limit_type +var ( + // Critical + // Stability: development + HwLimitTypeCritical = HwLimitTypeKey.String("critical") + // Degraded + // Stability: development + HwLimitTypeDegraded = HwLimitTypeKey.String("degraded") + // High Critical + // Stability: development + HwLimitTypeHighCritical = HwLimitTypeKey.String("high.critical") + // High Degraded + // Stability: development + HwLimitTypeHighDegraded = HwLimitTypeKey.String("high.degraded") + // Low Critical + // Stability: development + HwLimitTypeLowCritical = HwLimitTypeKey.String("low.critical") + // Low Degraded + // Stability: development + HwLimitTypeLowDegraded = HwLimitTypeKey.String("low.degraded") + // Maximum + // Stability: development + HwLimitTypeMax = HwLimitTypeKey.String("max") + // Throttled + // Stability: development + HwLimitTypeThrottled = HwLimitTypeKey.String("throttled") + // Turbo + // Stability: development + HwLimitTypeTurbo = HwLimitTypeKey.String("turbo") +) + +// Enum values for hw.logical_disk.state +var ( + // Used + // Stability: development + HwLogicalDiskStateUsed = HwLogicalDiskStateKey.String("used") + // Free + // Stability: development + HwLogicalDiskStateFree = HwLogicalDiskStateKey.String("free") +) + +// Enum values for hw.physical_disk.state +var ( + // Remaining + // Stability: development + HwPhysicalDiskStateRemaining = HwPhysicalDiskStateKey.String("remaining") +) + +// Enum values for hw.state +var ( + // Degraded + // Stability: development + HwStateDegraded = HwStateKey.String("degraded") + // Failed + // Stability: development + HwStateFailed = HwStateKey.String("failed") + // Needs Cleaning + // Stability: development + HwStateNeedsCleaning = HwStateKey.String("needs_cleaning") + // OK + // Stability: development + HwStateOk = HwStateKey.String("ok") + // Predicted Failure + // Stability: development + HwStatePredictedFailure = HwStateKey.String("predicted_failure") +) + +// Enum values for hw.tape_drive.operation_type +var ( + // Mount + // Stability: development + HwTapeDriveOperationTypeMount = HwTapeDriveOperationTypeKey.String("mount") + // Unmount + // Stability: development + HwTapeDriveOperationTypeUnmount = HwTapeDriveOperationTypeKey.String("unmount") + // Clean + // Stability: development + HwTapeDriveOperationTypeClean = HwTapeDriveOperationTypeKey.String("clean") +) + +// Enum values for hw.type +var ( + // Battery + // Stability: development + HwTypeBattery = HwTypeKey.String("battery") + // CPU + // Stability: development + HwTypeCPU = HwTypeKey.String("cpu") + // Disk controller + // Stability: development + HwTypeDiskController = HwTypeKey.String("disk_controller") + // Enclosure + // Stability: development + HwTypeEnclosure = HwTypeKey.String("enclosure") + // Fan + // Stability: development + HwTypeFan = HwTypeKey.String("fan") + // GPU + // Stability: development + HwTypeGpu = HwTypeKey.String("gpu") + // Logical disk + // Stability: development + HwTypeLogicalDisk = HwTypeKey.String("logical_disk") + // Memory + // Stability: development + HwTypeMemory = HwTypeKey.String("memory") + // Network + // Stability: development + HwTypeNetwork = HwTypeKey.String("network") + // Physical disk + // Stability: development + HwTypePhysicalDisk = HwTypeKey.String("physical_disk") + // Power supply + // Stability: development + HwTypePowerSupply = HwTypeKey.String("power_supply") + // Tape drive + // Stability: development + HwTypeTapeDrive = HwTypeKey.String("tape_drive") + // Temperature + // Stability: development + HwTypeTemperature = HwTypeKey.String("temperature") + // Voltage + // Stability: development + HwTypeVoltage = HwTypeKey.String("voltage") +) + +// Namespace: ios +const ( + // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The iOS lifecycle states are defined in the + // [UIApplicationDelegate documentation], and from which the `OS terminology` + // column values are derived. + // + // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate + IOSAppStateKey = attribute.Key("ios.app.state") +) + +// Enum values for ios.app.state +var ( + // The app has become `active`. Associated with UIKit notification + // `applicationDidBecomeActive`. + // + // Stability: development + IOSAppStateActive = IOSAppStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification + // `applicationWillResignActive`. + // + // Stability: development + IOSAppStateInactive = IOSAppStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit + // notification `applicationDidEnterBackground`. + // + // Stability: development + IOSAppStateBackground = IOSAppStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit + // notification `applicationWillEnterForeground`. + // + // Stability: development + IOSAppStateForeground = IOSAppStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification + // `applicationWillTerminate`. + // + // Stability: development + IOSAppStateTerminate = IOSAppStateKey.String("terminate") +) + +// Namespace: k8s +const ( + // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" + // semantic conventions. It represents the name of the cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-cluster" + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" + // semantic conventions. It represents a pseudo-ID for the cluster, set to the + // UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8s cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8s ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T X.667]. + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // > different from all other UUIDs generated before 3603 A.D., or is + // > extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + // + // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "redis" + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the number + // of times the container was restarted. This attribute can be used to identify + // a particular container (running or stopped) within a container spec. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to + // the "k8s.container.status.last_terminated_reason" semantic conventions. It + // represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Evicted", "Error" + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SContainerStatusReasonKey is the attribute Key conforming to the + // "k8s.container.status.reason" semantic conventions. It represents the reason + // for the container state. Corresponds to the `reason` field of the: + // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ContainerCreating", "CrashLoopBackOff", + // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff", + // "OOMKilled", "Completed", "Error", "ContainerCannotRun" + // + // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core + // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core + K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason") + + // K8SContainerStatusStateKey is the attribute Key conforming to the + // "k8s.container.status.state" semantic conventions. It represents the state of + // the container. [K8s ContainerState]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "terminated", "running", "waiting" + // + // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core + K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state") + + // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" + // semantic conventions. It represents the name of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" + // semantic conventions. It represents the UID of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" + // semantic conventions. It represents the UID of the DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SHPAMetricTypeKey is the attribute Key conforming to the + // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric + // source for the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Resource", "ContainerResource" + // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA. + K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type") + + // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic + // conventions. It represents the name of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SHPANameKey = attribute.Key("k8s.hpa.name") + + // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the + // API version of the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "apps/v1", "autoscaling/v2" + // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA + // spec. + K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version") + + // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Deployment", "StatefulSet" + // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind") + + // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-deployment", "my-statefulset" + // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name") + + // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic + // conventions. It represents the UID of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") + + // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size" + // semantic conventions. It represents the size (identifier) of the K8s huge + // page. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2Mi" + K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic + // conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic + // conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "default" + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNamespacePhaseKey is the attribute Key conforming to the + // "k8s.namespace.phase" semantic conventions. It represents the phase of the + // K8s namespace. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "terminating" + // Note: This attribute aligns with the `phase` field of the + // [K8s NamespaceStatus] + // + // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core + K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") + + // K8SNodeConditionStatusKey is the attribute Key conforming to the + // "k8s.node.condition.status" semantic conventions. It represents the status of + // the condition, one of True, False, Unknown. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "true", "false", "unknown" + // Note: This attribute aligns with the `status` field of the + // [NodeCondition] + // + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status") + + // K8SNodeConditionTypeKey is the attribute Key conforming to the + // "k8s.node.condition.type" semantic conventions. It represents the condition + // type of a K8s Node. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Ready", "DiskPressure" + // Note: K8s Node conditions as described + // by [K8s documentation]. + // + // This attribute aligns with the `type` field of the + // [NodeCondition] + // + // The set of possible values is not limited to those listed here. Managed + // Kubernetes environments, + // or custom controllers MAY introduce additional node condition types. + // When this occurs, the exact value as reported by the Kubernetes API SHOULD be + // used. + // + // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "node-1" + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic + // conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic + // conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-pod-autoconf" + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodStatusPhaseKey is the attribute Key conforming to the + // "k8s.pod.status.phase" semantic conventions. It represents the phase for the + // pod. Corresponds to the `phase` field of the: [K8s PodStatus]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Pending", "Running" + // + // [K8s PodStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#podstatus-v1-core + K8SPodStatusPhaseKey = attribute.Key("k8s.pod.status.phase") + + // K8SPodStatusReasonKey is the attribute Key conforming to the + // "k8s.pod.status.reason" semantic conventions. It represents the reason for + // the pod state. Corresponds to the `reason` field of the: [K8s PodStatus]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Evicted", "NodeAffinity" + // + // [K8s PodStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#podstatus-v1-core + K8SPodStatusReasonKey = attribute.Key("k8s.pod.status.reason") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic + // conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicationControllerNameKey is the attribute Key conforming to the + // "k8s.replicationcontroller.name" semantic conventions. It represents the name + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") + + // K8SReplicationControllerUIDKey is the attribute Key conforming to the + // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") + + // K8SResourceQuotaNameKey is the attribute Key conforming to the + // "k8s.resourcequota.name" semantic conventions. It represents the name of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") + + // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the + // "k8s.resourcequota.resource_name" semantic conventions. It represents the + // name of the K8s resource a resource quota defines. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "count/replicationcontrollers" + // Note: The value for this attribute can be either the full + // `count/[.]` string (e.g., count/deployments.apps, + // count/pods), or, for certain core Kubernetes resources, just the resource + // name (e.g., pods, services, configmaps). Both forms are supported by + // Kubernetes for object count quotas. See + // [Kubernetes Resource Quotas documentation] for more details. + // + // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#quota-on-object-count + K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name") + + // K8SResourceQuotaUIDKey is the attribute Key conforming to the + // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStorageclassNameKey is the attribute Key conforming to the + // "k8s.storageclass.name" semantic conventions. It represents the name of K8s + // [StorageClass] object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gold.storageclass.storage.k8s.io" + // + // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io + K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name") + + // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" + // semantic conventions. It represents the name of the K8s volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "volume0" + K8SVolumeNameKey = attribute.Key("k8s.volume.name") + + // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" + // semantic conventions. It represents the type of the K8s volume. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "emptyDir", "persistentVolumeClaim" + K8SVolumeTypeKey = attribute.Key("k8s.volume.type") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify a +// particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob +// annotation placed on the CronJob, the `` being the annotation name, the +// value being the annotation value. +func K8SCronJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.annotation."+key, val) +} + +// K8SCronJobLabel returns an attribute KeyValue conforming to the +// "k8s.cronjob.label" semantic conventions. It represents the label placed on +// the CronJob, the `` being the label name, the value being the label +// value. +func K8SCronJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.label."+key, val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.daemonset.annotation" semantic conventions. It represents the annotation +// placed on the DaemonSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.annotation."+key, val) +} + +// K8SDaemonSetLabel returns an attribute KeyValue conforming to the +// "k8s.daemonset.label" semantic conventions. It represents the label placed on +// the DaemonSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDaemonSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.label."+key, val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the +// "k8s.deployment.annotation" semantic conventions. It represents the annotation +// placed on the Deployment, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.annotation."+key, val) +} + +// K8SDeploymentLabel returns an attribute KeyValue conforming to the +// "k8s.deployment.label" semantic conventions. It represents the label placed on +// the Deployment, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDeploymentLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.label."+key, val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SHPAMetricType returns an attribute KeyValue conforming to the +// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric +// source for the horizontal pod autoscaler. +func K8SHPAMetricType(val string) attribute.KeyValue { + return K8SHPAMetricTypeKey.String(val) +} + +// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" +// semantic conventions. It represents the name of the horizontal pod autoscaler. +func K8SHPAName(val string) attribute.KeyValue { + return K8SHPANameKey.String(val) +} + +// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the +// API version of the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue { + return K8SHPAScaletargetrefAPIVersionKey.String(val) +} + +// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefKind(val string) attribute.KeyValue { + return K8SHPAScaletargetrefKindKey.String(val) +} + +// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefName(val string) attribute.KeyValue { + return K8SHPAScaletargetrefNameKey.String(val) +} + +// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" +// semantic conventions. It represents the UID of the horizontal pod autoscaler. +func K8SHPAUID(val string) attribute.KeyValue { + return K8SHPAUIDKey.String(val) +} + +// K8SHugepageSize returns an attribute KeyValue conforming to the +// "k8s.hugepage.size" semantic conventions. It represents the size (identifier) +// of the K8s huge page. +func K8SHugepageSize(val string) attribute.KeyValue { + return K8SHugepageSizeKey.String(val) +} + +// K8SJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.job.annotation" semantic conventions. It represents the annotation placed +// on the Job, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.annotation."+key, val) +} + +// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label" +// semantic conventions. It represents the label placed on the Job, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.label."+key, val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the +// "k8s.namespace.annotation" semantic conventions. It represents the annotation +// placed on the Namespace, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.annotation."+key, val) +} + +// K8SNamespaceLabel returns an attribute KeyValue conforming to the +// "k8s.namespace.label" semantic conventions. It represents the label placed on +// the Namespace, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SNamespaceLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.label."+key, val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeAnnotation returns an attribute KeyValue conforming to the +// "k8s.node.annotation" semantic conventions. It represents the annotation +// placed on the Node, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SNodeAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.annotation."+key, val) +} + +// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label" +// semantic conventions. It represents the label placed on the Node, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SNodeLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.label."+key, val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" +// semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodAnnotation returns an attribute KeyValue conforming to the +// "k8s.pod.annotation" semantic conventions. It represents the annotation placed +// on the Pod, the `` being the annotation name, the value being the +// annotation value. +func K8SPodAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.annotation."+key, val) +} + +// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label" +// semantic conventions. It represents the label placed on the Pod, the `` +// being the label name, the value being the label value. +func K8SPodLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.label."+key, val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.replicaset.annotation" semantic conventions. It represents the annotation +// placed on the ReplicaSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.annotation."+key, val) +} + +// K8SReplicaSetLabel returns an attribute KeyValue conforming to the +// "k8s.replicaset.label" semantic conventions. It represents the label placed on +// the ReplicaSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SReplicaSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.label."+key, val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicationControllerName returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.name" semantic conventions. It represents the name +// of the replication controller. +func K8SReplicationControllerName(val string) attribute.KeyValue { + return K8SReplicationControllerNameKey.String(val) +} + +// K8SReplicationControllerUID returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of +// the replication controller. +func K8SReplicationControllerUID(val string) attribute.KeyValue { + return K8SReplicationControllerUIDKey.String(val) +} + +// K8SResourceQuotaName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.name" semantic conventions. It represents the name of the +// resource quota. +func K8SResourceQuotaName(val string) attribute.KeyValue { + return K8SResourceQuotaNameKey.String(val) +} + +// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.resource_name" semantic conventions. It represents the name +// of the K8s resource a resource quota defines. +func K8SResourceQuotaResourceName(val string) attribute.KeyValue { + return K8SResourceQuotaResourceNameKey.String(val) +} + +// K8SResourceQuotaUID returns an attribute KeyValue conforming to the +// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the +// resource quota. +func K8SResourceQuotaUID(val string) attribute.KeyValue { + return K8SResourceQuotaUIDKey.String(val) +} + +// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.statefulset.annotation" semantic conventions. It represents the +// annotation placed on the StatefulSet, the `` being the annotation name, +// the value being the annotation value, even if the value is empty. +func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.annotation."+key, val) +} + +// K8SStatefulSetLabel returns an attribute KeyValue conforming to the +// "k8s.statefulset.label" semantic conventions. It represents the label placed +// on the StatefulSet, the `` being the label name, the value being the +// label value, even if the value is empty. +func K8SStatefulSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.label."+key, val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStorageclassName returns an attribute KeyValue conforming to the +// "k8s.storageclass.name" semantic conventions. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func K8SStorageclassName(val string) attribute.KeyValue { + return K8SStorageclassNameKey.String(val) +} + +// K8SVolumeName returns an attribute KeyValue conforming to the +// "k8s.volume.name" semantic conventions. It represents the name of the K8s +// volume. +func K8SVolumeName(val string) attribute.KeyValue { + return K8SVolumeNameKey.String(val) +} + +// Enum values for k8s.container.status.reason +var ( + // The container is being created. + // Stability: development + K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating") + // The container is in a crash loop back off state. + // Stability: development + K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff") + // There was an error creating the container configuration. + // Stability: development + K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError") + // There was an error pulling the container image. + // Stability: development + K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull") + // The container image pull is in back off state. + // Stability: development + K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff") + // The container was killed due to out of memory. + // Stability: development + K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled") + // The container has completed execution. + // Stability: development + K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed") + // There was an error with the container. + // Stability: development + K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error") + // The container cannot run. + // Stability: development + K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun") +) + +// Enum values for k8s.container.status.state +var ( + // The container has terminated. + // Stability: development + K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated") + // The container is running. + // Stability: development + K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running") + // The container is waiting. + // Stability: development + K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting") +) + +// Enum values for k8s.namespace.phase +var ( + // Active namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") + // Terminating namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") +) + +// Enum values for k8s.node.condition.status +var ( + // condition_true + // Stability: development + K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true") + // condition_false + // Stability: development + K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false") + // condition_unknown + // Stability: development + K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown") +) + +// Enum values for k8s.node.condition.type +var ( + // The node is healthy and ready to accept pods + // Stability: development + K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready") + // Pressure exists on the disk size—that is, if the disk capacity is low + // Stability: development + K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure") + // Pressure exists on the node memory—that is, if the node memory is low + // Stability: development + K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure") + // Pressure exists on the processes—that is, if there are too many processes + // on the node + // Stability: development + K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure") + // The network for the node is not correctly configured + // Stability: development + K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable") +) + +// Enum values for k8s.pod.status.phase +var ( + // The pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as + // well as time spent pulling images onto the host. + // + // Stability: development + K8SPodStatusPhasePending = K8SPodStatusPhaseKey.String("Pending") + // The pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being + // restarted. + // + // Stability: development + K8SPodStatusPhaseRunning = K8SPodStatusPhaseKey.String("Running") + // All containers in the pod have voluntarily terminated with a container exit + // code of 0, and the system is not going to restart any of these containers. + // + // Stability: development + K8SPodStatusPhaseSucceeded = K8SPodStatusPhaseKey.String("Succeeded") + // All containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by + // the system). + // + // Stability: development + K8SPodStatusPhaseFailed = K8SPodStatusPhaseKey.String("Failed") + // For some reason the state of the pod could not be obtained, typically due to + // an error in communicating with the host of the pod. + // + // Stability: development + K8SPodStatusPhaseUnknown = K8SPodStatusPhaseKey.String("Unknown") +) + +// Enum values for k8s.pod.status.reason +var ( + // The pod is evicted. + // Stability: development + K8SPodStatusReasonEvicted = K8SPodStatusReasonKey.String("Evicted") + // The pod is in a status because of its node affinity + // Stability: development + K8SPodStatusReasonNodeAffinity = K8SPodStatusReasonKey.String("NodeAffinity") + // The reason on a pod when its state cannot be confirmed as kubelet is + // unresponsive on the node it is (was) running. + // + // Stability: development + K8SPodStatusReasonNodeLost = K8SPodStatusReasonKey.String("NodeLost") + // The node is shutdown + // Stability: development + K8SPodStatusReasonShutdown = K8SPodStatusReasonKey.String("Shutdown") + // The pod was rejected admission to the node because of an error during + // admission that could not be categorized. + // + // Stability: development + K8SPodStatusReasonUnexpectedAdmissionError = K8SPodStatusReasonKey.String("UnexpectedAdmissionError") +) + +// Enum values for k8s.volume.type +var ( + // A [persistentVolumeClaim] volume + // Stability: development + // + // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim + K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") + // A [configMap] volume + // Stability: development + // + // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap + K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") + // A [downwardAPI] volume + // Stability: development + // + // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi + K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") + // An [emptyDir] volume + // Stability: development + // + // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir + K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") + // A [secret] volume + // Stability: development + // + // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret + K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") + // A [local] volume + // Stability: development + // + // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local + K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") +) + +// Namespace: linux +const ( + // LinuxMemorySlabStateKey is the attribute Key conforming to the + // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab + // memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "reclaimable", "unreclaimable" + LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") +) + +// Enum values for linux.memory.slab.state +var ( + // reclaimable + // Stability: development + LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") + // unreclaimable + // Stability: development + LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") +) + +// Namespace: log +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "audit.log" + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the basename of + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "uuid.log" + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/log/mysql/audit.log" + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full path to + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/lib/docker/uuid.log" + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") + + // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic + // conventions. It represents the stream associated with the log. See below for + // a list of well-known values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + LogIostreamKey = attribute.Key("log.iostream") + + // LogRecordOriginalKey is the attribute Key conforming to the + // "log.record.original" semantic conventions. It represents the complete + // original Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - + // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" + // Note: This value MAY be added when processing a Log Record which was + // originally transmitted as a string or equivalent data type AND the Body field + // of the Log Record does not contain the same value. (e.g. a syslog or a log + // record read from a file.) + LogRecordOriginalKey = attribute.Key("log.record.original") + + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an + // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other + // identifiers (e.g. UUID) may be used as needed. + // + // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogFileName returns an attribute KeyValue conforming to the "log.file.name" +// semantic conventions. It represents the basename of the file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" +// semantic conventions. It represents the full path to the file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path to +// the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// LogRecordOriginal returns an attribute KeyValue conforming to the +// "log.record.original" semantic conventions. It represents the complete +// original Log Record. +func LogRecordOriginal(val string) attribute.KeyValue { + return LogRecordOriginalKey.String(val) +} + +// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" +// semantic conventions. It represents a unique identifier for the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Enum values for log.iostream +var ( + // Logs from stdout stream + // Stability: development + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + // Stability: development + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Namespace: mainframe +const ( + // MainframeLparNameKey is the attribute Key conforming to the + // "mainframe.lpar.name" semantic conventions. It represents the name of the + // logical partition that hosts a systems with a mainframe operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "LPAR01" + MainframeLparNameKey = attribute.Key("mainframe.lpar.name") +) + +// MainframeLparName returns an attribute KeyValue conforming to the +// "mainframe.lpar.name" semantic conventions. It represents the name of the +// logical partition that hosts a systems with a mainframe operating system. +func MainframeLparName(val string) attribute.KeyValue { + return MainframeLparNameKey.String(val) +} + +// Namespace: messaging +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the batching + // operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client library + // supports both batch and single-message API for the same operation, + // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs + // and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique identifier + // for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "client-5", "myhost@8742@s8083jm" + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingConsumerGroupNameKey is the attribute Key conforming to the + // "messaging.consumer.group.name" semantic conventions. It represents the name + // of the consumer group with which a consumer is associated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-group", "indexer" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.consumer.group.name` is applicable and what it means in + // the context of that system. + MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the message + // destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MyQueue", "MyTopic" + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to the + // "messaging.destination.partition.id" semantic conventions. It represents the + // identifier of the partition messages are sent to or received from, unique + // within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to + // the "messaging.destination.subscription.name" semantic conventions. It + // represents the name of the destination subscription from which a message is + // consumed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "subscription-a" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.destination.subscription.name` is applicable and what it + // means in the context of that system. + MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the low + // cardinality representation of the messaging destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/customers/{customerId}" + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") + + // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It + // represents the ack deadline in seconds set for the modify ack deadline + // request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the + // ack id for a given message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ack_id + MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. + // It represents the delivery attempt for a given message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It + // represents the ordering key for a given message. If the attribute is not + // present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ordering_key + MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the message + // keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message.id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents a + // boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingKafkaOffsetKey is the attribute Key conforming to the + // "messaging.kafka.offset" semantic conventions. It represents the offset of a + // record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the size of + // the message body in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed body size. If + // both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents the + // conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: MyConversationId + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents the + // size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed size. If both + // sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used by + // the messaging system as an identifier for the message, represented as a + // string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 452a7c7c7c7048c2f887f61572b18fc2 + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ack", "nack", "send" + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to + // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It + // represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the + // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents + // the rabbitMQ message delivery tag. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") + + // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the + // "messaging.rocketmq.consumption_model" semantic conventions. It represents + // the model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to + // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It + // represents the delay time level for delay message, which determines the + // message delay time. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming + // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. + // It represents the timestamp in milliseconds that the delay message is + // expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents the it + // is essential for FIFO message. Messages that belong to the same message group + // are always processed one by one within the same consumer group. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myMessageGroup + MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents the + // key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "keyA", "keyB" + MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketMQMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: tagA + MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents the + // type of message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketMQNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myNamespace + MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to + // the "messaging.servicebus.disposition_status" semantic conventions. It + // represents the describes the [settlement type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock + MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to + // the "messaging.servicebus.message.delivery_count" semantic conventions. It + // represents the number of deliveries that have been attempted for this + // message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.servicebus.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") + + // MessagingSystemKey is the attribute Key conforming to the "messaging.system" + // semantic conventions. It represents the messaging system as identified by the + // client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate with + // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the + // instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to the +// "messaging.batch.message_count" semantic conventions. It represents the number +// of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique identifier +// for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingConsumerGroupName returns an attribute KeyValue conforming to the +// "messaging.consumer.group.name" semantic conventions. It represents the name +// of the consumer group with which a consumer is associated. +func MessagingConsumerGroupName(val string) attribute.KeyValue { + return MessagingConsumerGroupNameKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the +// "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be unnamed +// or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name. +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming to +// the "messaging.destination.partition.id" semantic conventions. It represents +// the identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming +// to the "messaging.destination.subscription.name" semantic conventions. It +// represents the name of the destination subscription from which a message is +// consumed. +func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingDestinationSubscriptionNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to the +// "messaging.destination.template" semantic conventions. It represents the low +// cardinality representation of the messaging destination name. +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to the +// "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming +// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It +// represents the UTC epoch seconds at which the message has been accepted and +// stored in the entity. +func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) +} + +// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It +// represents the ack deadline in seconds set for the modify ack deadline +// request. +func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the +// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the +// ack id for a given message. +func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageAckIDKey.String(val) +} + +// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It +// represents the ordering key for a given message. If the attribute is not +// present, the message does not have an ordering key. +func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the message +// keys in Kafka are used for grouping alike messages to ensure they're processed +// on the same partition. They differ from `messaging.message.id` in that they're +// not unique. If the key is `null`, the attribute MUST NOT be set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the +// "messaging.kafka.message.tombstone" semantic conventions. It represents a +// boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingKafkaOffset returns an attribute KeyValue conforming to the +// "messaging.kafka.offset" semantic conventions. It represents the offset of a +// record in the corresponding Kafka partition. +func MessagingKafkaOffset(val int) attribute.KeyValue { + return MessagingKafkaOffsetKey.Int(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size of +// the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming to the +// "messaging.message.conversation_id" semantic conventions. It represents the +// conversation ID identifying the conversation to which the message belongs, +// represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the +// "messaging.message.envelope.size" semantic conventions. It represents the size +// of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by the +// messaging system as an identifier for the message, represented as a string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitMQDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming +// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It +// represents the rabbitMQ message delivery tag. +func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitMQMessageDeliveryTagKey.Int(val) +} + +// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.group" semantic conventions. It represents the it +// is essential for FIFO message. Messages that belong to the same message group +// are always processed one by one within the same consumer group. +func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { + return MessagingRocketMQMessageGroupKey.String(val) +} + +// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.keys" semantic conventions. It represents the +// key(s) of message, another way to mark message besides message id. +func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketMQMessageKeysKey.StringSlice(val) +} + +// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketMQMessageTag(val string) attribute.KeyValue { + return MessagingRocketMQMessageTagKey.String(val) +} + +// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the +// "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketMQNamespace(val string) attribute.KeyValue { + return MessagingRocketMQNamespaceKey.String(val) +} + +// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServiceBusMessageDeliveryCountKey.Int(val) +} + +// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has been +// accepted and stored in the entity. +func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) +} + +// Enum values for messaging.operation.type +var ( + // A message is created. "Create" spans always refer to a single message and are + // used to provide a unique creation context for messages in batch sending + // scenarios. + // + // Stability: development + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are provided for sending to an intermediary. If a single + // message is sent, the context of the "Send" span can be used as the creation + // context and no "Create" span needs to be created. + // + // Stability: development + MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") + // One or more messages are requested by a consumer. This operation refers to + // pull-based scenarios, where consumers explicitly call methods of messaging + // SDKs to receive messages. + // + // Stability: development + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are processed by a consumer. + // + // Stability: development + MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") + // One or more messages are settled. + // + // Stability: development + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +// Enum values for messaging.rocketmq.consumption_model +var ( + // Clustering consumption model + // Stability: development + MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") + // Broadcasting consumption model + // Stability: development + MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") +) + +// Enum values for messaging.rocketmq.message.type +var ( + // Normal message + // Stability: development + MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") + // FIFO message + // Stability: development + MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") + // Delay message + // Stability: development + MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") + // Transaction message + // Stability: development + MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") +) + +// Enum values for messaging.servicebus.disposition_status +var ( + // Message is completed + // Stability: development + MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") + // Message is abandoned + // Stability: development + MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + // Stability: development + MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") + // Message is deferred + // Stability: development + MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") +) + +// Enum values for messaging.system +var ( + // Apache ActiveMQ + // Stability: development + MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") + // Amazon Simple Notification Service (SNS) + // Stability: development + MessagingSystemAWSSNS = MessagingSystemKey.String("aws.sns") + // Amazon Simple Queue Service (SQS) + // Stability: development + MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + // Stability: development + MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + // Stability: development + MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + // Stability: development + MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + // Stability: development + MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + // Stability: development + MessagingSystemJMS = MessagingSystemKey.String("jms") + // Apache Kafka + // Stability: development + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + // Stability: development + MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + // Stability: development + MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") + // Apache Pulsar + // Stability: development + MessagingSystemPulsar = MessagingSystemKey.String("pulsar") +) + +// Namespace: network +const ( + // NetworkCarrierICCKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier network. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: DE + NetworkCarrierICCKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMCCKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile carrier + // country code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 310 + NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMNCKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile carrier + // network code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 001 + NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of the + // mobile carrier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: sprint + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionStateKey is the attribute Key conforming to the + // "network.connection.state" semantic conventions. It represents the state of + // network connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "close_wait" + // Note: Connection states are defined as part of the [rfc9293] + // + // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 + NetworkConnectionStateKey = attribute.Key("network.connection.state") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the this + // describes more details regarding the connection.type. It may be the type of + // cell technology connection, but it could be used for describing details about + // a wifi connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: LTE + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the internet + // connection type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: wifi + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkInterfaceNameKey is the attribute Key conforming to the + // "network.interface.name" semantic conventions. It represents the network + // interface name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lo", "eth0" + NetworkInterfaceNameKey = attribute.Key("network.interface.name") + + // NetworkIODirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "transmit" + NetworkIODirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" + // semantic conventions. It represents the peer port number of the network + // connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the + // [OSI application layer] or non-OSI equivalent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "amqp", "http", "mqtt" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI application layer]: https://wikipedia.org/wiki/Application_layer + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the actual + // version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.1", "2" + // Note: If protocol version is subject to negotiation (for example using [ALPN] + // ), this attribute SHOULD be set to the negotiated version. If the actual + // protocol version is not known, this attribute SHOULD NOT be set. + // + // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the + // [OSI transport layer] or [inter-process communication method]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "tcp", "udp" + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + // + // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer + // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic + // conventions. It represents the [OSI network layer] or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "ipv4", "ipv6" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI network layer]: https://wikipedia.org/wiki/Network_layer + NetworkTypeKey = attribute.Key("network.type") +) + +// NetworkCarrierICC returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierICC(val string) attribute.KeyValue { + return NetworkCarrierICCKey.String(val) +} + +// NetworkCarrierMCC returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMCC(val string) attribute.KeyValue { + return NetworkCarrierMCCKey.String(val) +} + +// NetworkCarrierMNC returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMNC(val string) attribute.KeyValue { + return NetworkCarrierMNCKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkInterfaceName returns an attribute KeyValue conforming to the +// "network.interface.name" semantic conventions. It represents the network +// interface name. +func NetworkInterfaceName(val string) attribute.KeyValue { + return NetworkInterfaceNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local address +// of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port number +// of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Enum values for network.connection.state +var ( + // closed + // Stability: development + NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") + // close_wait + // Stability: development + NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") + // closing + // Stability: development + NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") + // established + // Stability: development + NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") + // fin_wait_1 + // Stability: development + NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") + // fin_wait_2 + // Stability: development + NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") + // last_ack + // Stability: development + NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") + // listen + // Stability: development + NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") + // syn_received + // Stability: development + NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") + // syn_sent + // Stability: development + NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") + // time_wait + // Stability: development + NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") +) + +// Enum values for network.connection.subtype +var ( + // GPRS + // Stability: development + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + // Stability: development + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + // Stability: development + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + // Stability: development + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + // Stability: development + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + // Stability: development + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + // Stability: development + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + // Stability: development + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + // Stability: development + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + // Stability: development + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + // Stability: development + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + // Stability: development + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + // Stability: development + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + // Stability: development + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + // Stability: development + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + // Stability: development + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + // Stability: development + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + // Stability: development + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + // Stability: development + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + // Stability: development + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + // Stability: development + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// Enum values for network.connection.type +var ( + // wifi + // Stability: development + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + // Stability: development + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + // Stability: development + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + // Stability: development + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + // Stability: development + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +// Enum values for network.io.direction +var ( + // transmit + // Stability: development + NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") + // receive + // Stability: development + NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") +) + +// Enum values for network.transport +var ( + // TCP + // Stability: stable + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + // Stability: stable + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. + // Stability: stable + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + // Stability: stable + NetworkTransportUnix = NetworkTransportKey.String("unix") + // QUIC + // Stability: stable + NetworkTransportQUIC = NetworkTransportKey.String("quic") +) + +// Enum values for network.type +var ( + // IPv4 + // Stability: stable + NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") + // IPv6 + // Stability: stable + NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") +) + +// Namespace: nfs +const ( + // NfsOperationNameKey is the attribute Key conforming to the + // "nfs.operation.name" semantic conventions. It represents the NFSv4+ operation + // name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "OPEN", "READ", "GETATTR" + NfsOperationNameKey = attribute.Key("nfs.operation.name") + + // NfsServerRepcacheStatusKey is the attribute Key conforming to the + // "nfs.server.repcache.status" semantic conventions. It represents the linux: + // one of "hit" (NFSD_STATS_RC_HITS), "miss" (NFSD_STATS_RC_MISSES), or + // "nocache" (NFSD_STATS_RC_NOCACHE -- uncacheable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: hit + NfsServerRepcacheStatusKey = attribute.Key("nfs.server.repcache.status") +) + +// NfsOperationName returns an attribute KeyValue conforming to the +// "nfs.operation.name" semantic conventions. It represents the NFSv4+ operation +// name. +func NfsOperationName(val string) attribute.KeyValue { + return NfsOperationNameKey.String(val) +} + +// NfsServerRepcacheStatus returns an attribute KeyValue conforming to the +// "nfs.server.repcache.status" semantic conventions. It represents the linux: +// one of "hit" (NFSD_STATS_RC_HITS), "miss" (NFSD_STATS_RC_MISSES), or "nocache" +// (NFSD_STATS_RC_NOCACHE -- uncacheable). +func NfsServerRepcacheStatus(val string) attribute.KeyValue { + return NfsServerRepcacheStatusKey.String(val) +} + +// Namespace: oci +const ( + // OCIManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of the + // OCI image manifest. For container images specifically is the digest by which + // the container image is known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" + // Note: Follows [OCI Image Manifest Specification], and specifically the + // [Digest property]. + // An example can be found in [Example Image Manifest]. + // + // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md + // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests + // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest + OCIManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OCIManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OCIManifestDigest(val string) attribute.KeyValue { + return OCIManifestDigestKey.String(val) +} + +// Namespace: onc_rpc +const ( + // OncRPCProcedureNameKey is the attribute Key conforming to the + // "onc_rpc.procedure.name" semantic conventions. It represents the ONC/Sun RPC + // procedure name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "OPEN", "READ", "GETATTR" + OncRPCProcedureNameKey = attribute.Key("onc_rpc.procedure.name") + + // OncRPCProcedureNumberKey is the attribute Key conforming to the + // "onc_rpc.procedure.number" semantic conventions. It represents the ONC/Sun + // RPC procedure number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OncRPCProcedureNumberKey = attribute.Key("onc_rpc.procedure.number") + + // OncRPCProgramNameKey is the attribute Key conforming to the + // "onc_rpc.program.name" semantic conventions. It represents the ONC/Sun RPC + // program name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "portmapper", "nfs" + OncRPCProgramNameKey = attribute.Key("onc_rpc.program.name") + + // OncRPCVersionKey is the attribute Key conforming to the "onc_rpc.version" + // semantic conventions. It represents the ONC/Sun RPC program version. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OncRPCVersionKey = attribute.Key("onc_rpc.version") +) + +// OncRPCProcedureName returns an attribute KeyValue conforming to the +// "onc_rpc.procedure.name" semantic conventions. It represents the ONC/Sun RPC +// procedure name. +func OncRPCProcedureName(val string) attribute.KeyValue { + return OncRPCProcedureNameKey.String(val) +} + +// OncRPCProcedureNumber returns an attribute KeyValue conforming to the +// "onc_rpc.procedure.number" semantic conventions. It represents the ONC/Sun RPC +// procedure number. +func OncRPCProcedureNumber(val int) attribute.KeyValue { + return OncRPCProcedureNumberKey.Int(val) +} + +// OncRPCProgramName returns an attribute KeyValue conforming to the +// "onc_rpc.program.name" semantic conventions. It represents the ONC/Sun RPC +// program name. +func OncRPCProgramName(val string) attribute.KeyValue { + return OncRPCProgramNameKey.String(val) +} + +// OncRPCVersion returns an attribute KeyValue conforming to the +// "onc_rpc.version" semantic conventions. It represents the ONC/Sun RPC program +// version. +func OncRPCVersion(val int) attribute.KeyValue { + return OncRPCVersionKey.Int(val) +} + +// Namespace: openai +const ( + // OpenAIRequestServiceTierKey is the attribute Key conforming to the + // "openai.request.service_tier" semantic conventions. It represents the service + // tier requested. May be a specific tier, default, or auto. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "auto", "default" + OpenAIRequestServiceTierKey = attribute.Key("openai.request.service_tier") + + // OpenAIResponseServiceTierKey is the attribute Key conforming to the + // "openai.response.service_tier" semantic conventions. It represents the + // service tier used for the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "scale", "default" + OpenAIResponseServiceTierKey = attribute.Key("openai.response.service_tier") + + // OpenAIResponseSystemFingerprintKey is the attribute Key conforming to the + // "openai.response.system_fingerprint" semantic conventions. It represents a + // fingerprint to track any eventual change in the Generative AI environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fp_44709d6fcb" + OpenAIResponseSystemFingerprintKey = attribute.Key("openai.response.system_fingerprint") +) + +// OpenAIResponseServiceTier returns an attribute KeyValue conforming to the +// "openai.response.service_tier" semantic conventions. It represents the service +// tier used for the response. +func OpenAIResponseServiceTier(val string) attribute.KeyValue { + return OpenAIResponseServiceTierKey.String(val) +} + +// OpenAIResponseSystemFingerprint returns an attribute KeyValue conforming to +// the "openai.response.system_fingerprint" semantic conventions. It represents a +// fingerprint to track any eventual change in the Generative AI environment. +func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue { + return OpenAIResponseSystemFingerprintKey.String(val) +} + +// Enum values for openai.request.service_tier +var ( + // The system will utilize scale tier credits until they are exhausted. + // Stability: development + OpenAIRequestServiceTierAuto = OpenAIRequestServiceTierKey.String("auto") + // The system will utilize the default scale tier. + // Stability: development + OpenAIRequestServiceTierDefault = OpenAIRequestServiceTierKey.String("default") +) + +// Namespace: openshift +const ( + // OpenShiftClusterquotaNameKey is the attribute Key conforming to the + // "openshift.clusterquota.name" semantic conventions. It represents the name of + // the cluster quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + OpenShiftClusterquotaNameKey = attribute.Key("openshift.clusterquota.name") + + // OpenShiftClusterquotaUIDKey is the attribute Key conforming to the + // "openshift.clusterquota.uid" semantic conventions. It represents the UID of + // the cluster quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + OpenShiftClusterquotaUIDKey = attribute.Key("openshift.clusterquota.uid") +) + +// OpenShiftClusterquotaName returns an attribute KeyValue conforming to the +// "openshift.clusterquota.name" semantic conventions. It represents the name of +// the cluster quota. +func OpenShiftClusterquotaName(val string) attribute.KeyValue { + return OpenShiftClusterquotaNameKey.String(val) +} + +// OpenShiftClusterquotaUID returns an attribute KeyValue conforming to the +// "openshift.clusterquota.uid" semantic conventions. It represents the UID of +// the cluster quota. +func OpenShiftClusterquotaUID(val string) attribute.KeyValue { + return OpenShiftClusterquotaUIDKey.String(val) +} + +// Namespace: opentracing +const ( + // OpenTracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the parent-child + // Reference type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The causal relationship between a child Span and a parent Span. + OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +// Enum values for opentracing.ref_type +var ( + // The parent Span depends on the child Span in some capacity + // Stability: development + OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + // Stability: development + OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") +) + +// Namespace: os +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TQ3C.230805.001.B2", "20E247", "22621" + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to be + // parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" semantic + // conventions. It represents the version string of the operating system as + // defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + OSVersionKey = attribute.Key("os.version") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the "os.description" +// semantic conventions. It represents the human readable (not intended to be +// parsed) OS version information, like e.g. reported by `ver` or +// `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating system +// as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Enum values for os.type +var ( + // Microsoft Windows + // Stability: development + OSTypeWindows = OSTypeKey.String("windows") + // Linux + // Stability: development + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + // Stability: development + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + // Stability: development + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + // Stability: development + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + // Stability: development + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + // Stability: development + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + // Stability: development + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + // Stability: development + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + // Stability: development + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + // Stability: development + OSTypeZOS = OSTypeKey.String("zos") +) + +// Namespace: otel +const ( + // OTelComponentNameKey is the attribute Key conforming to the + // "otel.component.name" semantic conventions. It represents a name uniquely + // identifying the instance of the OpenTelemetry component within its containing + // SDK instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otlp_grpc_span_exporter/0", "custom-name" + // Note: Implementations SHOULD ensure a low cardinality for this attribute, + // even across application or SDK restarts. + // E.g. implementations MUST NOT use UUIDs as values for this attribute. + // + // Implementations MAY achieve these goals by following a + // `/` pattern, e.g. + // `batching_span_processor/0`. + // Hereby `otel.component.type` refers to the corresponding attribute value of + // the component. + // + // The value of `instance-counter` MAY be automatically assigned by the + // component and uniqueness within the enclosing SDK instance MUST be + // guaranteed. + // For example, `` MAY be implemented by using a monotonically + // increasing counter (starting with `0`), which is incremented every time an + // instance of the given component type is started. + // + // With this implementation, for example the first Batching Span Processor would + // have `batching_span_processor/0` + // as `otel.component.name`, the second one `batching_span_processor/1` and so + // on. + // These values will therefore be reused in the case of an application restart. + OTelComponentNameKey = attribute.Key("otel.component.name") + + // OTelComponentTypeKey is the attribute Key conforming to the + // "otel.component.type" semantic conventions. It represents a name identifying + // the type of the OpenTelemetry component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "batching_span_processor", "com.example.MySpanExporter" + // Note: If none of the standardized values apply, implementations SHOULD use + // the language-defined name of the type. + // E.g. for Java the fully qualified classname SHOULD be used in this case. + OTelComponentTypeKey = attribute.Key("otel.component.type") + + // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" + // semantic conventions. It represents the name of the instrumentation scope - ( + // `InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "io.opentelemetry.contrib.mongodb" + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeSchemaURLKey is the attribute Key conforming to the + // "otel.scope.schema_url" semantic conventions. It represents the schema URL of + // the instrumentation scope. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://opentelemetry.io/schemas/1.31.0" + OTelScopeSchemaURLKey = attribute.Key("otel.scope.schema_url") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of the + // instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.0.0" + OTelScopeVersionKey = attribute.Key("otel.scope.version") + + // OTelSpanParentOriginKey is the attribute Key conforming to the + // "otel.span.parent.origin" semantic conventions. It represents the determines + // whether the span has a parent span, and if so, + // [whether it is a remote parent]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin") + + // OTelSpanSamplingResultKey is the attribute Key conforming to the + // "otel.span.sampling_result" semantic conventions. It represents the result + // value of the sampler for this span. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") + + // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" + // semantic conventions. It represents the name of the code, either "OK" or + // "ERROR". MUST NOT be set if the status code is UNSET. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the description + // of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "resource not found" + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +// OTelComponentName returns an attribute KeyValue conforming to the +// "otel.component.name" semantic conventions. It represents a name uniquely +// identifying the instance of the OpenTelemetry component within its containing +// SDK instance. +func OTelComponentName(val string) attribute.KeyValue { + return OTelComponentNameKey.String(val) +} + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeSchemaURL returns an attribute KeyValue conforming to the +// "otel.scope.schema_url" semantic conventions. It represents the schema URL of +// the instrumentation scope. +func OTelScopeSchemaURL(val string) attribute.KeyValue { + return OTelScopeSchemaURLKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the description +// of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Enum values for otel.component.type +var ( + // The builtin SDK batching span processor + // + // Stability: development + OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") + // The builtin SDK simple span processor + // + // Stability: development + OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") + // The builtin SDK batching log record processor + // + // Stability: development + OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") + // The builtin SDK simple log record processor + // + // Stability: development + OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") + // OTLP span exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") + // OTLP span exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") + // OTLP span exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") + // Zipkin span exporter over HTTP + // + // Stability: development + OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter") + // OTLP log record exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") + // OTLP log record exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") + // OTLP log record exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") + // The builtin SDK periodically exporting metric reader + // + // Stability: development + OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") + // OTLP metric exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") + // OTLP metric exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") + // OTLP metric exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") + // Prometheus metric exporter over HTTP with the default text-based format + // + // Stability: development + OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter") +) + +// Enum values for otel.span.parent.origin +var ( + // The span does not have a parent, it is a root span + // Stability: development + OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none") + // The span has a parent and the parent's span context [isRemote()] is false + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local") + // The span has a parent and the parent's span context [isRemote()] is true + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote") +) + +// Enum values for otel.span.sampling_result +var ( + // The span is not sampled and not recording + // Stability: development + OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") + // The span is not sampled, but recording + // Stability: development + OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") + // The span is sampled and recording + // Stability: development + OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") +) + +// Enum values for otel.status_code +var ( + // The operation has been validated by an Application developer or Operator to + // have completed successfully. + // Stability: stable + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error. + // Stability: stable + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// Namespace: peer +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic + // conventions. It represents the [`service.name`] of the remote service. SHOULD + // be equal to the actual `service.name` resource attribute of the remote + // service if any. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: AuthTokenCache + // Note: Examples of `peer.service` that users may specify: + // + // - A Redis cache of auth tokens as `peer.service="AuthTokenCache"`. + // - A gRPC service `rpc.service="io.opentelemetry.AuthService"` may be hosted + // in both a gateway, `peer.service="ExternalApiService"` and a backend, + // `peer.service="AuthService"`. + // + // + // [`service.name`]: /docs/resource/README.md#service + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the "peer.service" +// semantic conventions. It represents the [`service.name`] of the remote +// service. SHOULD be equal to the actual `service.name` resource attribute of +// the remote service if any. +// +// [`service.name`]: /docs/resource/README.md#service +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// Namespace: pprof +const ( + // PprofLocationIsFoldedKey is the attribute Key conforming to the + // "pprof.location.is_folded" semantic conventions. It represents the provides + // an indication that multiple symbols map to this location's address, for + // example due to identical code folding by the linker. In that case the line + // information represents one of the multiple symbols. This field must be + // recomputed when the symbolization state of the profile changes. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + PprofLocationIsFoldedKey = attribute.Key("pprof.location.is_folded") + + // PprofMappingHasFilenamesKey is the attribute Key conforming to the + // "pprof.mapping.has_filenames" semantic conventions. It represents the + // indicates that there are filenames related to this mapping. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + PprofMappingHasFilenamesKey = attribute.Key("pprof.mapping.has_filenames") + + // PprofMappingHasFunctionsKey is the attribute Key conforming to the + // "pprof.mapping.has_functions" semantic conventions. It represents the + // indicates that there are functions related to this mapping. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + PprofMappingHasFunctionsKey = attribute.Key("pprof.mapping.has_functions") + + // PprofMappingHasInlineFramesKey is the attribute Key conforming to the + // "pprof.mapping.has_inline_frames" semantic conventions. It represents the + // indicates that there are inline frames related to this mapping. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + PprofMappingHasInlineFramesKey = attribute.Key("pprof.mapping.has_inline_frames") + + // PprofMappingHasLineNumbersKey is the attribute Key conforming to the + // "pprof.mapping.has_line_numbers" semantic conventions. It represents the + // indicates that there are line numbers related to this mapping. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + PprofMappingHasLineNumbersKey = attribute.Key("pprof.mapping.has_line_numbers") + + // PprofProfileCommentKey is the attribute Key conforming to the + // "pprof.profile.comment" semantic conventions. It represents the free-form + // text associated with the profile. This field should not be used to store any + // machine-readable information, it is only for human-friendly content. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "hello world", "bazinga" + PprofProfileCommentKey = attribute.Key("pprof.profile.comment") +) + +// PprofLocationIsFolded returns an attribute KeyValue conforming to the +// "pprof.location.is_folded" semantic conventions. It represents the provides an +// indication that multiple symbols map to this location's address, for example +// due to identical code folding by the linker. In that case the line information +// represents one of the multiple symbols. This field must be recomputed when the +// symbolization state of the profile changes. +func PprofLocationIsFolded(val bool) attribute.KeyValue { + return PprofLocationIsFoldedKey.Bool(val) +} + +// PprofMappingHasFilenames returns an attribute KeyValue conforming to the +// "pprof.mapping.has_filenames" semantic conventions. It represents the +// indicates that there are filenames related to this mapping. +func PprofMappingHasFilenames(val bool) attribute.KeyValue { + return PprofMappingHasFilenamesKey.Bool(val) +} + +// PprofMappingHasFunctions returns an attribute KeyValue conforming to the +// "pprof.mapping.has_functions" semantic conventions. It represents the +// indicates that there are functions related to this mapping. +func PprofMappingHasFunctions(val bool) attribute.KeyValue { + return PprofMappingHasFunctionsKey.Bool(val) +} + +// PprofMappingHasInlineFrames returns an attribute KeyValue conforming to the +// "pprof.mapping.has_inline_frames" semantic conventions. It represents the +// indicates that there are inline frames related to this mapping. +func PprofMappingHasInlineFrames(val bool) attribute.KeyValue { + return PprofMappingHasInlineFramesKey.Bool(val) +} + +// PprofMappingHasLineNumbers returns an attribute KeyValue conforming to the +// "pprof.mapping.has_line_numbers" semantic conventions. It represents the +// indicates that there are line numbers related to this mapping. +func PprofMappingHasLineNumbers(val bool) attribute.KeyValue { + return PprofMappingHasLineNumbersKey.Bool(val) +} + +// PprofProfileComment returns an attribute KeyValue conforming to the +// "pprof.profile.comment" semantic conventions. It represents the free-form text +// associated with the profile. This field should not be used to store any +// machine-readable information, it is only for human-friendly content. +func PprofProfileComment(val ...string) attribute.KeyValue { + return PprofProfileCommentKey.StringSlice(val) +} + +// Namespace: process +const ( + // ProcessArgsCountKey is the attribute Key conforming to the + // "process.args_count" semantic conventions. It represents the length of the + // process.command_args array. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 4 + // Note: This field can be useful for querying or performing bucket analysis on + // how many arguments were provided to start a process. More arguments may be an + // indication of suspicious activity. + ProcessArgsCountKey = attribute.Key("process.args_count") + + // ProcessCommandKey is the attribute Key conforming to the "process.command" + // semantic conventions. It represents the command used to launch the process + // (i.e. the command name). On Linux based systems, can be set to the zeroth + // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter + // extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otelcol" + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received by + // the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this + // would be the full argv vector passed to `main`. SHOULD NOT be collected by + // default unless there is sanitization that excludes sensitive data. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otecol", "--config=config.yaml" + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full command + // used to launch the process as a single string representing the full command. + // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if + // you have to assemble it just for monitoring; use `process.command_args` + // instead. SHOULD NOT be collected by default unless there is sanitization that + // excludes sensitive data. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch.type" semantic conventions. It represents the + // specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch.type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and time + // the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:25:34.853Z" + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the + // "process.executable.build_id.gnu" semantic conventions. It represents the GNU + // build ID as found in the `.note.gnu.build-id` ELF section (hex string). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" + ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") + + // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the + // "process.executable.build_id.go" semantic conventions. It represents the Go + // build ID as retrieved by `go tool buildid `. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" + ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") + + // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the + // "process.executable.build_id.htlhash" semantic conventions. It represents the + // profiling specific build ID for executables. See the OTel specification for + // Profiles for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "600DCAFE4A110000F2BF38C493F5FB92" + ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name of the + // process executable. On Linux based systems, this SHOULD be set to the base + // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to + // the base name of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcol" + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full path + // to the process executable. On Linux based systems, can be set to the target + // of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/cmd/otelcol" + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" + // semantic conventions. It represents the exit code of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" + // semantic conventions. It represents the date and time the process exited, in + // ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:26:12.315Z" + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID of the + // process's group leader. This is also the process group ID (PGID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether the + // process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessLinuxCgroupKey is the attribute Key conforming to the + // "process.linux.cgroup" semantic conventions. It represents the control group + // associated with the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", + // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" + // Note: Control groups (cgroups) are a kernel feature used to organize and + // manage process resources. This attribute provides the path(s) to the + // cgroup(s) associated with the process, which should match the contents of the + // [/proc/[PID]/cgroup] file. + // + // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html + ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent Process + // identifier (PPID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic + // conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user ID + // (RUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the username of + // the real user of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of the + // runtime of this process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "OpenJDK Runtime Environment" + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the version of + // the runtime of this process, as returned by the runtime without modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14.0.2 + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved user ID + // (SUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the username of + // the saved user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID of + // the process's session leader. This is also the session ID (SID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessStateKey is the attribute Key conforming to the "process.state" + // semantic conventions. It represents the process state, e.g., + // [Linux Process State Codes]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "running" + // + // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES + ProcessStateKey = attribute.Key("process.state") + + // ProcessTitleKey is the attribute Key conforming to the "process.title" + // semantic conventions. It represents the process title (proctitle). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cat /etc/hostname", "xfce4-session", "bash" + // Note: In many Unix-like systems, process title (proctitle), is the string + // that represents the name or command line of a running process, displayed by + // system monitoring tools like ps, top, and htop. + ProcessTitleKey = attribute.Key("process.title") + + // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" + // semantic conventions. It represents the effective user ID (EUID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" + // semantic conventions. It represents the username of the effective user of the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic + // conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process + // namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") + + // ProcessWorkingDirectoryKey is the attribute Key conforming to the + // "process.working_directory" semantic conventions. It represents the working + // directory of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/root" + ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") +) + +// ProcessArgsCount returns an attribute KeyValue conforming to the +// "process.args_count" semantic conventions. It represents the length of the +// process.command_args array. +func ProcessArgsCount(val int) attribute.KeyValue { + return ProcessArgsCountKey.Int(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be set +// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the +// first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the command +// arguments (including the command/executable itself) as received by the +// process. On Linux-based systems (and some other Unixoid systems supporting +// procfs), can be set according to the list of null-delimited strings extracted +// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full +// argv vector passed to `main`. SHOULD NOT be collected by default unless there +// is sanitization that excludes sensitive data. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if +// you have to assemble it just for monitoring; use `process.command_args` +// instead. SHOULD NOT be collected by default unless there is sanitization that +// excludes sensitive data. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and time +// the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the +// "process.environment_variable" semantic conventions. It represents the process +// environment variables, `` being the environment variable name, the value +// being the environment variable value. +func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue { + return attribute.String("process.environment_variable."+key, val) +} + +// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the +// "process.executable.build_id.gnu" semantic conventions. It represents the GNU +// build ID as found in the `.note.gnu.build-id` ELF section (hex string). +func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGNUKey.String(val) +} + +// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the +// "process.executable.build_id.go" semantic conventions. It represents the Go +// build ID as retrieved by `go tool buildid `. +func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGoKey.String(val) +} + +// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to +// the "process.executable.build_id.htlhash" semantic conventions. It represents +// the profiling specific build ID for executables. See the OTel specification +// for Profiles for more information. +func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { + return ProcessExecutableBuildIDHtlhashKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of the +// process executable. On Linux based systems, this SHOULD be set to the base +// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the +// base name of `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path to +// the process executable. On Linux based systems, can be set to the target of +// `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time the +// process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of the +// process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessLinuxCgroup returns an attribute KeyValue conforming to the +// "process.linux.cgroup" semantic conventions. It represents the control group +// associated with the process. +func ProcessLinuxCgroup(val string) attribute.KeyValue { + return ProcessLinuxCgroupKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" +// semantic conventions. It represents the username of the user that owns the +// process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user ID +// (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username of +// the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessTitle returns an attribute KeyValue conforming to the "process.title" +// semantic conventions. It represents the process title (proctitle). +func ProcessTitle(val string) attribute.KeyValue { + return ProcessTitleKey.String(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" +// semantic conventions. It represents the virtual process identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// ProcessWorkingDirectory returns an attribute KeyValue conforming to the +// "process.working_directory" semantic conventions. It represents the working +// directory of the process. +func ProcessWorkingDirectory(val string) attribute.KeyValue { + return ProcessWorkingDirectoryKey.String(val) +} + +// Enum values for process.context_switch.type +var ( + // voluntary + // Stability: development + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + // Stability: development + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +// Enum values for process.state +var ( + // running + // Stability: development + ProcessStateRunning = ProcessStateKey.String("running") + // sleeping + // Stability: development + ProcessStateSleeping = ProcessStateKey.String("sleeping") + // stopped + // Stability: development + ProcessStateStopped = ProcessStateKey.String("stopped") + // defunct + // Stability: development + ProcessStateDefunct = ProcessStateKey.String("defunct") +) + +// Namespace: profile +const ( + // ProfileFrameTypeKey is the attribute Key conforming to the + // "profile.frame.type" semantic conventions. It represents the describes the + // interpreter or compiler of a single frame. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpython" + ProfileFrameTypeKey = attribute.Key("profile.frame.type") +) + +// Enum values for profile.frame.type +var ( + // [.NET] + // + // Stability: development + // + // [.NET]: https://wikipedia.org/wiki/.NET + ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") + // [JVM] + // + // Stability: development + // + // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine + ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") + // [Kernel] + // + // Stability: development + // + // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) + ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") + // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a + // more precise value MUST be used. + // + // Stability: development + // + // [C]: https://wikipedia.org/wiki/C_(programming_language) + // [C++]: https://wikipedia.org/wiki/C%2B%2B + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") + // [Perl] + // + // Stability: development + // + // [Perl]: https://wikipedia.org/wiki/Perl + ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") + // [PHP] + // + // Stability: development + // + // [PHP]: https://wikipedia.org/wiki/PHP + ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") + // [Python] + // + // Stability: development + // + // [Python]: https://wikipedia.org/wiki/Python_(programming_language) + ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") + // [Ruby] + // + // Stability: development + // + // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) + ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") + // [V8JS] + // + // Stability: development + // + // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) + ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") + // [Erlang] + // + // Stability: development + // + // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) + ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") + // [Go], + // + // Stability: development + // + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") + // [Rust] + // + // Stability: development + // + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") +) + +// Namespace: rpc +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes] of the Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [error codes]: https://connectrpc.com//docs/protocol/#error-codes + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the + // [numeric status code] of the gRPC request. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` + // property of response if it is an error response. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -32700, 100 + RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Parse error", "User already exists" + RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJSONRPCRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, string, + // `null` or missing (for notifications), value is expected to be cast to string + // for simplicity. Use empty string in case of `null` value. Omit entirely if + // this is a notification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10", "request-7", "" + RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJSONRPCVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0", "1.0" + RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It MUST be calculated as two different counters + // starting from `1` one for sent messages and one for received message.. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" + // semantic conventions. It represents the whether this is a received or sent + // message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic + // conventions. It represents the this is the logical name of the method from + // the RPC interface perspective. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: exampleMethod + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic + // conventions. It represents the full (logical) name of the service being + // called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myservice.EchoService + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic + // conventions. It represents a string identifying the remoting system. See + // below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCSystemKey = attribute.Key("rpc.system") +) + +// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the +// connect request metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val) +} + +// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the +// connect response metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val) +} + +// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC +// request metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.request.metadata."+key, val) +} + +// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC +// response metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.response.metadata."+key, val) +} + +// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` +// property of response if it is an error response. +func RPCJSONRPCErrorCode(val int) attribute.KeyValue { + return RPCJSONRPCErrorCodeKey.Int(val) +} + +// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJSONRPCErrorMessage(val string) attribute.KeyValue { + return RPCJSONRPCErrorMessageKey.String(val) +} + +// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property +// of request or response. Since protocol allows id to be int, string, `null` or +// missing (for notifications), value is expected to be cast to string for +// simplicity. Use empty string in case of `null` value. Omit entirely if this is +// a notification. +func RPCJSONRPCRequestID(val string) attribute.KeyValue { + return RPCJSONRPCRequestIDKey.String(val) +} + +// RPCJSONRPCVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version +// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't +// specify this, the value can be omitted. +func RPCJSONRPCVersion(val string) attribute.KeyValue { + return RPCJSONRPCVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" +// semantic conventions. It MUST be calculated as two different counters starting +// from `1` one for sent messages and one for received message.. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the this is the logical name of the method +// from the RPC interface perspective. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// Enum values for rpc.connect_rpc.error_code +var ( + // cancelled + // Stability: development + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + // Stability: development + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + // Stability: development + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + // Stability: development + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + // Stability: development + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + // Stability: development + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + // Stability: development + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + // Stability: development + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + // Stability: development + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + // Stability: development + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + // Stability: development + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + // Stability: development + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + // Stability: development + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + // Stability: development + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + // Stability: development + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + // Stability: development + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +// Enum values for rpc.grpc.status_code +var ( + // OK + // Stability: development + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + // Stability: development + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + // Stability: development + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + // Stability: development + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + // Stability: development + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + // Stability: development + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + // Stability: development + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + // Stability: development + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + // Stability: development + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + // Stability: development + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + // Stability: development + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + // Stability: development + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + // Stability: development + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + // Stability: development + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + // Stability: development + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + // Stability: development + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + // Stability: development + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Enum values for rpc.message.type +var ( + // sent + // Stability: development + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + // Stability: development + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +// Enum values for rpc.system +var ( + // gRPC + // Stability: development + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + // Stability: development + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + // Stability: development + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + // Stability: development + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + // Stability: development + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") + // [ONC RPC (Sun RPC)] + // Stability: development + // + // [ONC RPC (Sun RPC)]: https://datatracker.ietf.org/doc/html/rfc5531 + RPCSystemOncRPC = RPCSystemKey.String("onc_rpc") + // JSON-RPC + // Stability: development + RPCSystemJSONRPC = RPCSystemKey.String("jsonrpc") +) + +// Namespace: security_rule +const ( + // SecurityRuleCategoryKey is the attribute Key conforming to the + // "security_rule.category" semantic conventions. It represents a categorization + // value keyword used by the entity using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Attempted Information Leak" + SecurityRuleCategoryKey = attribute.Key("security_rule.category") + + // SecurityRuleDescriptionKey is the attribute Key conforming to the + // "security_rule.description" semantic conventions. It represents the + // description of the rule generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Block requests to public DNS over HTTPS / TLS protocols" + SecurityRuleDescriptionKey = attribute.Key("security_rule.description") + + // SecurityRuleLicenseKey is the attribute Key conforming to the + // "security_rule.license" semantic conventions. It represents the name of the + // license under which the rule used to generate this event is made available. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apache 2.0" + SecurityRuleLicenseKey = attribute.Key("security_rule.license") + + // SecurityRuleNameKey is the attribute Key conforming to the + // "security_rule.name" semantic conventions. It represents the name of the rule + // or signature generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BLOCK_DNS_over_TLS" + SecurityRuleNameKey = attribute.Key("security_rule.name") + + // SecurityRuleReferenceKey is the attribute Key conforming to the + // "security_rule.reference" semantic conventions. It represents the reference + // URL to additional information about the rule used to generate this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" + // Note: The URL can point to the vendor’s documentation about the rule. If + // that’s not available, it can also be a link to a more general page + // describing this type of alert. + SecurityRuleReferenceKey = attribute.Key("security_rule.reference") + + // SecurityRuleRulesetNameKey is the attribute Key conforming to the + // "security_rule.ruleset.name" semantic conventions. It represents the name of + // the ruleset, policy, group, or parent category in which the rule used to + // generate this event is a member. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Standard_Protocol_Filters" + SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") + + // SecurityRuleUUIDKey is the attribute Key conforming to the + // "security_rule.uuid" semantic conventions. It represents a rule ID that is + // unique within the scope of a set or group of agents, observers, or other + // entities using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" + SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") + + // SecurityRuleVersionKey is the attribute Key conforming to the + // "security_rule.version" semantic conventions. It represents the version / + // revision of the rule being used for analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.0.0" + SecurityRuleVersionKey = attribute.Key("security_rule.version") +) + +// SecurityRuleCategory returns an attribute KeyValue conforming to the +// "security_rule.category" semantic conventions. It represents a categorization +// value keyword used by the entity using the rule for detection of this event. +func SecurityRuleCategory(val string) attribute.KeyValue { + return SecurityRuleCategoryKey.String(val) +} + +// SecurityRuleDescription returns an attribute KeyValue conforming to the +// "security_rule.description" semantic conventions. It represents the +// description of the rule generating the event. +func SecurityRuleDescription(val string) attribute.KeyValue { + return SecurityRuleDescriptionKey.String(val) +} + +// SecurityRuleLicense returns an attribute KeyValue conforming to the +// "security_rule.license" semantic conventions. It represents the name of the +// license under which the rule used to generate this event is made available. +func SecurityRuleLicense(val string) attribute.KeyValue { + return SecurityRuleLicenseKey.String(val) +} + +// SecurityRuleName returns an attribute KeyValue conforming to the +// "security_rule.name" semantic conventions. It represents the name of the rule +// or signature generating the event. +func SecurityRuleName(val string) attribute.KeyValue { + return SecurityRuleNameKey.String(val) +} + +// SecurityRuleReference returns an attribute KeyValue conforming to the +// "security_rule.reference" semantic conventions. It represents the reference +// URL to additional information about the rule used to generate this event. +func SecurityRuleReference(val string) attribute.KeyValue { + return SecurityRuleReferenceKey.String(val) +} + +// SecurityRuleRulesetName returns an attribute KeyValue conforming to the +// "security_rule.ruleset.name" semantic conventions. It represents the name of +// the ruleset, policy, group, or parent category in which the rule used to +// generate this event is a member. +func SecurityRuleRulesetName(val string) attribute.KeyValue { + return SecurityRuleRulesetNameKey.String(val) +} + +// SecurityRuleUUID returns an attribute KeyValue conforming to the +// "security_rule.uuid" semantic conventions. It represents a rule ID that is +// unique within the scope of a set or group of agents, observers, or other +// entities using the rule for detection of this event. +func SecurityRuleUUID(val string) attribute.KeyValue { + return SecurityRuleUUIDKey.String(val) +} + +// SecurityRuleVersion returns an attribute KeyValue conforming to the +// "security_rule.version" semantic conventions. It represents the version / +// revision of the rule being used for analysis. +func SecurityRuleVersion(val string) attribute.KeyValue { + return SecurityRuleVersionKey.String(val) +} + +// Namespace: server +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.address` SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" semantic + // conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.port` SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the "server.address" +// semantic conventions. It represents the server domain name if available +// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// Namespace: service +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID of + // the service instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "627cc493-f310-47de-96bd-71410b7dec09" + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random Version 1 + // or Version 4 [RFC + // 4122] UUID, but are free to use an inherent unique ID as + // the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the purposes of + // identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`] file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we do + // not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it can't + // unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as + // they know the target address and + // port. + // + // [RFC + // 4122]: https://www.ietf.org/rfc/rfc4122.txt + // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" semantic + // conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "shoppingcart" + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. + // If `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + // + // [`process.executable.name`]: process.md + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Shop" + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace + // defined (so the empty/unspecified namespace is simply one more valid + // namespace). Zero-length namespace string is assumed equal to unspecified + // namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the "service.version" + // semantic conventions. It represents the version string of the service API or + // implementation. The format is not defined by these conventions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "2.0.0", "a01dbef8a" + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of the +// service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the "service.name" +// semantic conventions. It represents the logical name of the service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Namespace: session +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" semantic + // conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// Namespace: signalr +const ( + // SignalRConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the signalR + // HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "app_shutdown", "timeout" + SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalRTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the + // [SignalR transport type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "web_sockets", "long_polling" + // + // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md + SignalRTransportKey = attribute.Key("signalr.transport") +) + +// Enum values for signalr.connection.status +var ( + // The connection was closed normally. + // Stability: stable + SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout. + // Stability: stable + SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down. + // Stability: stable + SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") +) + +// Enum values for signalr.transport +var ( + // ServerSentEvents protocol + // Stability: stable + SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") + // LongPolling protocol + // Stability: stable + SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") + // WebSockets protocol + // Stability: stable + SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") +) + +// Namespace: source +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the destination side, and when communicating through + // an intermediary, `source.address` SHOULD represent the source address behind + // any intermediaries, for example proxies, if it's available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" semantic + // conventions. It represents the source port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the "source.address" +// semantic conventions. It represents the source address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number. +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Namespace: system +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "(identifier)" + SystemDeviceKey = attribute.Key("system.device") + + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the filesystem + // mode. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "rw, ro" + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/mnt/data" + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the filesystem + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "used" + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the filesystem + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ext4" + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") + + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free", "cached" + SystemMemoryStateKey = attribute.Key("system.memory.state") + + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "in" + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingFaultTypeKey is the attribute Key conforming to the + // "system.paging.fault.type" semantic conventions. It represents the paging + // fault type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "minor" + SystemPagingFaultTypeKey = attribute.Key("system.paging.fault.type") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory paging + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free" + SystemPagingStateKey = attribute.Key("system.paging.state") +) + +// SystemDevice returns an attribute KeyValue conforming to the "system.device" +// semantic conventions. It represents the device identifier. +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode. +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the +// "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path. +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Enum values for system.filesystem.state +var ( + // used + // Stability: development + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + // Stability: development + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + // Stability: development + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +// Enum values for system.filesystem.type +var ( + // fat32 + // Stability: development + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + // Stability: development + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + // Stability: development + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + // Stability: development + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + // Stability: development + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + // Stability: development + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// Enum values for system.memory.state +var ( + // Actual used virtual memory in bytes. + // Stability: development + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + // Stability: development + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // buffers + // Stability: development + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + // Stability: development + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Enum values for system.paging.direction +var ( + // in + // Stability: development + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + // Stability: development + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +// Enum values for system.paging.fault.type +var ( + // major + // Stability: development + SystemPagingFaultTypeMajor = SystemPagingFaultTypeKey.String("major") + // minor + // Stability: development + SystemPagingFaultTypeMinor = SystemPagingFaultTypeKey.String("minor") +) + +// Enum values for system.paging.state +var ( + // used + // Stability: development + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + // Stability: development + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +// Namespace: telemetry +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of the + // auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "parts-unlimited-java" + // Note: Official auto instrumentation agents and distributions SHOULD set the + // `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the version + // string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the language of + // the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "opentelemetry" + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to + // `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is used, + // this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.2.3" + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version string +// of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// Enum values for telemetry.sdk.language +var ( + // cpp + // Stability: stable + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + // Stability: stable + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + // Stability: stable + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + // Stability: stable + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + // Stability: stable + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + // Stability: stable + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + // Stability: stable + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + // Stability: stable + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + // Stability: stable + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + // Stability: stable + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + // Stability: stable + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + // Stability: stable + TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") +) + +// Namespace: test +const ( + // TestCaseNameKey is the attribute Key conforming to the "test.case.name" + // semantic conventions. It represents the fully qualified human readable name + // of the [test case]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", + // "ExampleTestCase1_test1" + // + // [test case]: https://wikipedia.org/wiki/Test_case + TestCaseNameKey = attribute.Key("test.case.name") + + // TestCaseResultStatusKey is the attribute Key conforming to the + // "test.case.result.status" semantic conventions. It represents the status of + // the actual test case result from test execution. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pass", "fail" + TestCaseResultStatusKey = attribute.Key("test.case.result.status") + + // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" + // semantic conventions. It represents the human readable name of a [test suite] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TestSuite1" + // + // [test suite]: https://wikipedia.org/wiki/Test_suite + TestSuiteNameKey = attribute.Key("test.suite.name") + + // TestSuiteRunStatusKey is the attribute Key conforming to the + // "test.suite.run.status" semantic conventions. It represents the status of the + // test suite run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "skipped", "aborted", "timed_out", + // "in_progress" + TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") +) + +// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" +// semantic conventions. It represents the fully qualified human readable name of +// the [test case]. +// +// [test case]: https://wikipedia.org/wiki/Test_case +func TestCaseName(val string) attribute.KeyValue { + return TestCaseNameKey.String(val) +} + +// TestSuiteName returns an attribute KeyValue conforming to the +// "test.suite.name" semantic conventions. It represents the human readable name +// of a [test suite]. +// +// [test suite]: https://wikipedia.org/wiki/Test_suite +func TestSuiteName(val string) attribute.KeyValue { + return TestSuiteNameKey.String(val) +} + +// Enum values for test.case.result.status +var ( + // pass + // Stability: development + TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") + // fail + // Stability: development + TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") +) + +// Enum values for test.suite.run.status +var ( + // success + // Stability: development + TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") + // failure + // Stability: development + TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") + // skipped + // Stability: development + TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") + // aborted + // Stability: development + TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") + // timed_out + // Stability: development + TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") + // in_progress + // Stability: development + TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") +) + +// Namespace: thread +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed to OS + // thread ID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: + // Examples of where the value can be extracted from: + // + // | Language or platform | Source | + // | --- | --- | + // | JVM | `Thread.currentThread().threadId()` | + // | .NET | `Thread.CurrentThread.ManagedThreadId` | + // | Python | `threading.current_thread().ident` | + // | Ruby | `Thread.current.object_id` | + // | C++ | `std::this_thread::get_id()` | + // | Erlang | `erlang:self()` | + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic + // conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: main + // Note: + // Examples of where the value can be extracted from: + // + // | Language or platform | Source | + // | --- | --- | + // | JVM | `Thread.currentThread().getName()` | + // | .NET | `Thread.CurrentThread.Name` | + // | Python | `threading.current_thread().name` | + // | Ruby | `Thread.current.name` | + // | Erlang | `erlang:process_info(self(), registered_name)` | + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic +// conventions. It represents the current "managed" thread ID (as opposed to OS +// thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Namespace: tls +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic + // conventions. It represents the string indicating the [cipher] used during the + // current connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` + // of the [registered TLS Cipher Suits]. + // + // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 + // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the client. This is usually + // mutually-exclusive of `client.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // client. This is usually mutually-exclusive of `client.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the date/Time + // indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the array + // of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the given + // cipher, when applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "secp256r1" + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the "tls.established" + // semantic conventions. It represents the boolean flag indicating if the TLS + // negotiation was successful and transitioned to an encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" + // semantic conventions. It represents the string indicating the protocol being + // tunneled. Per the values in the [IANA registry], this string should be lower + // case. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "http/1.1" + // + // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" + // semantic conventions. It represents the normalized lowercase protocol name + // parsed from original string of the negotiated [SSL/TLS protocol version]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric part + // of the version parsed from the original string of the negotiated + // [SSL/TLS protocol version]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2", "3" + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic + // conventions. It represents the boolean flag indicating if this TLS connection + // was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the server. This is usually + // mutually-exclusive of `server.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // server. This is usually mutually-exclusive of `server.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" + // semantic conventions. It represents a hash that identifies servers based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the date/Time + // indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the server. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the [cipher] used +// during the current connection. +// +// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also exists +// in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// client. This is usually mutually-exclusive of `client.certificate` since that +// value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the client. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" +// semantic conventions. It represents a hash that identifies clients based on +// how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic +// conventions. It represents the string indicating the curve used for the given +// cipher, when applicable. +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string indicating +// the protocol being tunneled. Per the values in the [IANA registry], this +// string should be lower case. +// +// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part of +// the version parsed from the original string of the negotiated +// [SSL/TLS protocol version]. +// +// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also exists +// in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// server. This is usually mutually-exclusive of `server.certificate` since that +// value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the server. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Enum values for tls.protocol.name +var ( + // ssl + // Stability: development + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + // Stability: development + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// Namespace: url +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" semantic + // conventions. It represents the domain extracted from the `url.full`, such as + // "opentelemetry.io". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", + // "[1080:0:0:0:8:800:200C:417A]" + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If + // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` + // and `]` characters should also be captured in the domain field. + // + // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from the + // `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: The file extension is only set if it exists, as not every url has a + // file extension. When the file name has multiple extensions `example.tar.gz`, + // only the last one should be captured `gz`, not `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic + // conventions. It represents the [URI fragment] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SemConv" + // + // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network resource + // according to [RFC3986]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the fragment + // is not transmitted over HTTP, but if it is known, it SHOULD be included + // nevertheless. + // + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. + // In such case username and password SHOULD be redacted and attribute's value + // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. + // + // `url.full` SHOULD capture the absolute URL when it is available (or can be + // reconstructed). + // + // Sensitive content provided in `url.full` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the + // value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `https://www.example.com/path?color=blue&sig=REDACTED`. + // + // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" semantic + // conventions. It represents the unmodified original URL as seen in the event + // source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", + // "search?q=OpenTelemetry" + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI path] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/search" + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + // + // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full`. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI query] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "q=OpenTelemetry" + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `q=OpenTelemetry&sig=REDACTED`. + // + // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.com", "foo.co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // For example, the registered domain for `foo.example.com` is `example.com`. + // Trying to approximate this by simply taking the last two labels will not work + // well for TLDs such as `co.uk`. + // + // [public suffix list]: https://publicsuffix.org/ + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic + // conventions. It represents the [URI scheme] component identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https", "ftp", "telnet" + // + // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name under + // the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain contains + // all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "east", "sub2.sub1" + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the + // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the + // subdomain field should contain `sub2.sub1`, with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" semantic + // conventions. It represents the low-cardinality template of an + // [absolute path reference]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/users/{id}", "/users/:id", "/users?id={id}" + // + // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective top + // level domain (eTLD), also known as the domain suffix, is the last part of the + // domain name. For example, the top level domain for example.com is `com`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com", "co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // + // [public suffix list]: https://publicsuffix.org/ + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the `url.full`, +// such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the "url.extension" +// semantic conventions. It represents the file extension extracted from the +// `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the "url.fragment" +// semantic conventions. It represents the [URI fragment] component. +// +// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" semantic +// conventions. It represents the absolute URL describing a network resource +// according to [RFC3986]. +// +// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the "url.original" +// semantic conventions. It represents the unmodified original URL as seen in the +// event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" semantic +// conventions. It represents the [URI path] component. +// +// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" semantic +// conventions. It represents the port extracted from the `url.full`. +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic +// conventions. It represents the [URI query] component. +// +// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI scheme] component identifying the +// used protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" +// semantic conventions. It represents the subdomain portion of a fully qualified +// domain name includes all of the names except the host name under the +// registered_domain. In a partially qualified domain, or if the qualification +// level of the full name cannot be determined, subdomain contains all of the +// names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the "url.template" +// semantic conventions. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of the +// domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Namespace: user +const ( + // UserEmailKey is the attribute Key conforming to the "user.email" semantic + // conventions. It represents the user email address. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein@example.com" + UserEmailKey = attribute.Key("user.email") + + // UserFullNameKey is the attribute Key conforming to the "user.full_name" + // semantic conventions. It represents the user's full name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Albert Einstein" + UserFullNameKey = attribute.Key("user.full_name") + + // UserHashKey is the attribute Key conforming to the "user.hash" semantic + // conventions. It represents the unique user hash to correlate information for + // a user in anonymized form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" + // Note: Useful if `user.id` or `user.name` contain confidential information and + // cannot be used. + UserHashKey = attribute.Key("user.hash") + + // UserIDKey is the attribute Key conforming to the "user.id" semantic + // conventions. It represents the unique identifier of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" + UserIDKey = attribute.Key("user.id") + + // UserNameKey is the attribute Key conforming to the "user.name" semantic + // conventions. It represents the short name or login/username of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein" + UserNameKey = attribute.Key("user.name") + + // UserRolesKey is the attribute Key conforming to the "user.roles" semantic + // conventions. It represents the array of user roles at the time of the event. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "admin", "reporting_user" + UserRolesKey = attribute.Key("user.roles") +) + +// UserEmail returns an attribute KeyValue conforming to the "user.email" +// semantic conventions. It represents the user email address. +func UserEmail(val string) attribute.KeyValue { + return UserEmailKey.String(val) +} + +// UserFullName returns an attribute KeyValue conforming to the "user.full_name" +// semantic conventions. It represents the user's full name. +func UserFullName(val string) attribute.KeyValue { + return UserFullNameKey.String(val) +} + +// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic +// conventions. It represents the unique user hash to correlate information for a +// user in anonymized form. +func UserHash(val string) attribute.KeyValue { + return UserHashKey.String(val) +} + +// UserID returns an attribute KeyValue conforming to the "user.id" semantic +// conventions. It represents the unique identifier of the user. +func UserID(val string) attribute.KeyValue { + return UserIDKey.String(val) +} + +// UserName returns an attribute KeyValue conforming to the "user.name" semantic +// conventions. It represents the short name or login/username of the user. +func UserName(val string) attribute.KeyValue { + return UserNameKey.String(val) +} + +// UserRoles returns an attribute KeyValue conforming to the "user.roles" +// semantic conventions. It represents the array of user roles at the time of the +// event. +func UserRoles(val ...string) attribute.KeyValue { + return UserRolesKey.StringSlice(val) +} + +// Namespace: user_agent +const ( + // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" + // semantic conventions. It represents the name of the user-agent extracted from + // original. Usually refers to the browser's name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Safari", "YourApp" + // Note: [Example] of extracting browser's name from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant name SHOULD be selected. In such a scenario it should align with + // `user_agent.version` + // + // [Example]: https://www.whatsmyua.info + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of the + // [HTTP User-Agent] header sent by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 + // grpc-java-okhttp/1.27.2" + // + // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentOSNameKey is the attribute Key conforming to the + // "user_agent.os.name" semantic conventions. It represents the human readable + // operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + // Note: For mapping user agent strings to OS names, libraries such as + // [ua-parser] can be utilized. + // + // [ua-parser]: https://github.com/ua-parser + UserAgentOSNameKey = attribute.Key("user_agent.os.name") + + // UserAgentOSVersionKey is the attribute Key conforming to the + // "user_agent.os.version" semantic conventions. It represents the version + // string of the operating system as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // Note: For mapping user agent strings to OS versions, libraries such as + // [ua-parser] can be utilized. + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + // [ua-parser]: https://github.com/ua-parser + UserAgentOSVersionKey = attribute.Key("user_agent.os.version") + + // UserAgentSyntheticTypeKey is the attribute Key conforming to the + // "user_agent.synthetic.type" semantic conventions. It represents the specifies + // the category of synthetic traffic, such as tests or bots. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute MAY be derived from the contents of the + // `user_agent.original` attribute. Components that populate the attribute are + // responsible for determining what they consider to be synthetic bot or test + // traffic. This attribute can either be set for self-identification purposes, + // or on telemetry detected to be generated as a result of a synthetic request. + // This attribute is useful for distinguishing between genuine client traffic + // and synthetic traffic generated by bots or tests. + UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of the + // user-agent extracted from original. Usually refers to the browser's version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.1.2", "1.0.0" + // Note: [Example] of extracting browser's version from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant version SHOULD be selected. In such a scenario it should align + // with `user_agent.name` + // + // [Example]: https://www.whatsmyua.info + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP User-Agent] header sent by the client. +// +// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentOSName returns an attribute KeyValue conforming to the +// "user_agent.os.name" semantic conventions. It represents the human readable +// operating system name. +func UserAgentOSName(val string) attribute.KeyValue { + return UserAgentOSNameKey.String(val) +} + +// UserAgentOSVersion returns an attribute KeyValue conforming to the +// "user_agent.os.version" semantic conventions. It represents the version string +// of the operating system as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func UserAgentOSVersion(val string) attribute.KeyValue { + return UserAgentOSVersionKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version. +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// Enum values for user_agent.synthetic.type +var ( + // Bot source. + // Stability: development + UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") + // Synthetic test source. + // Stability: development + UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") +) + +// Namespace: vcs +const ( + // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" + // semantic conventions. It represents the ID of the change (pull request/merge + // request/changelist) if applicable. This is usually a unique (within + // repository) identifier generated by the VCS system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + VCSChangeIDKey = attribute.Key("vcs.change.id") + + // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" + // semantic conventions. It represents the state of the change (pull + // request/merge request/changelist). + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "open", "closed", "merged" + VCSChangeStateKey = attribute.Key("vcs.change.state") + + // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" + // semantic conventions. It represents the human readable title of the change + // (pull request/merge request/changelist). This title is often a brief summary + // of the change and may get merged in to a ref as the commit summary. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update + // dependency" + VCSChangeTitleKey = attribute.Key("vcs.change.title") + + // VCSLineChangeTypeKey is the attribute Key conforming to the + // "vcs.line_change.type" semantic conventions. It represents the type of line + // change being measured on a branch or change. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "added", "removed" + VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") + + // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" + // semantic conventions. It represents the group owner within the version + // control system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org", "myteam", "business-unit" + VCSOwnerNameKey = attribute.Key("vcs.owner.name") + + // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" + // semantic conventions. It represents the name of the version control system + // provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "github", "gitlab", "gitea", "bitbucket" + VCSProviderNameKey = attribute.Key("vcs.provider.name") + + // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") + + // VCSRefBaseRevisionKey is the attribute Key conforming to the + // "vcs.ref.base.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. The + // revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.base.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") + + // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") + + // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") + + // VCSRefHeadRevisionKey is the attribute Key conforming to the + // "vcs.ref.head.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `head` refers to where you are right now; the current reference at a + // given time.The revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.head.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") + + // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") + + // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic + // conventions. It represents the type of the [reference] in the repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefTypeKey = attribute.Key("vcs.ref.type") + + // VCSRepositoryNameKey is the attribute Key conforming to the + // "vcs.repository.name" semantic conventions. It represents the human readable + // name of the repository. It SHOULD NOT include any additional identifier like + // Group/SubGroup in GitLab or organization in GitHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "semantic-conventions", "my-cool-repo" + // Note: Due to it only being the name, it can clash with forks of the same + // repository if collecting telemetry across multiple orgs or groups in + // the same backends. + VCSRepositoryNameKey = attribute.Key("vcs.repository.name") + + // VCSRepositoryURLFullKey is the attribute Key conforming to the + // "vcs.repository.url.full" semantic conventions. It represents the + // [canonical URL] of the repository providing the complete HTTP(S) address in + // order to locate and identify the repository through a browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/opentelemetry/open-telemetry-collector-contrib", + // "https://gitlab.com/my-org/my-project/my-projects-project/repo" + // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include + // the `.git` extension. + // + // [canonical URL]: https://support.google.com/webmasters/answer/10347851 + VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") + + // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the + // "vcs.revision_delta.direction" semantic conventions. It represents the type + // of revision comparison. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ahead", "behind" + VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") +) + +// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" +// semantic conventions. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func VCSChangeID(val string) attribute.KeyValue { + return VCSChangeIDKey.String(val) +} + +// VCSChangeTitle returns an attribute KeyValue conforming to the +// "vcs.change.title" semantic conventions. It represents the human readable +// title of the change (pull request/merge request/changelist). This title is +// often a brief summary of the change and may get merged in to a ref as the +// commit summary. +func VCSChangeTitle(val string) attribute.KeyValue { + return VCSChangeTitleKey.String(val) +} + +// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" +// semantic conventions. It represents the group owner within the version control +// system. +func VCSOwnerName(val string) attribute.KeyValue { + return VCSOwnerNameKey.String(val) +} + +// VCSRefBaseName returns an attribute KeyValue conforming to the +// "vcs.ref.base.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefBaseName(val string) attribute.KeyValue { + return VCSRefBaseNameKey.String(val) +} + +// VCSRefBaseRevision returns an attribute KeyValue conforming to the +// "vcs.ref.base.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefBaseRevision(val string) attribute.KeyValue { + return VCSRefBaseRevisionKey.String(val) +} + +// VCSRefHeadName returns an attribute KeyValue conforming to the +// "vcs.ref.head.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefHeadName(val string) attribute.KeyValue { + return VCSRefHeadNameKey.String(val) +} + +// VCSRefHeadRevision returns an attribute KeyValue conforming to the +// "vcs.ref.head.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefHeadRevision(val string) attribute.KeyValue { + return VCSRefHeadRevisionKey.String(val) +} + +// VCSRepositoryName returns an attribute KeyValue conforming to the +// "vcs.repository.name" semantic conventions. It represents the human readable +// name of the repository. It SHOULD NOT include any additional identifier like +// Group/SubGroup in GitLab or organization in GitHub. +func VCSRepositoryName(val string) attribute.KeyValue { + return VCSRepositoryNameKey.String(val) +} + +// VCSRepositoryURLFull returns an attribute KeyValue conforming to the +// "vcs.repository.url.full" semantic conventions. It represents the +// [canonical URL] of the repository providing the complete HTTP(S) address in +// order to locate and identify the repository through a browser. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851 +func VCSRepositoryURLFull(val string) attribute.KeyValue { + return VCSRepositoryURLFullKey.String(val) +} + +// Enum values for vcs.change.state +var ( + // Open means the change is currently active and under review. It hasn't been + // merged into the target branch yet, and it's still possible to make changes or + // add comments. + // Stability: development + VCSChangeStateOpen = VCSChangeStateKey.String("open") + // WIP (work-in-progress, draft) means the change is still in progress and not + // yet ready for a full review. It might still undergo significant changes. + // Stability: development + VCSChangeStateWip = VCSChangeStateKey.String("wip") + // Closed means the merge request has been closed without merging. This can + // happen for various reasons, such as the changes being deemed unnecessary, the + // issue being resolved in another way, or the author deciding to withdraw the + // request. + // Stability: development + VCSChangeStateClosed = VCSChangeStateKey.String("closed") + // Merged indicates that the change has been successfully integrated into the + // target codebase. + // Stability: development + VCSChangeStateMerged = VCSChangeStateKey.String("merged") +) + +// Enum values for vcs.line_change.type +var ( + // How many lines were added. + // Stability: development + VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") + // How many lines were removed. + // Stability: development + VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") +) + +// Enum values for vcs.provider.name +var ( + // [GitHub] + // Stability: development + // + // [GitHub]: https://github.com + VCSProviderNameGithub = VCSProviderNameKey.String("github") + // [GitLab] + // Stability: development + // + // [GitLab]: https://gitlab.com + VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") + // [Gitea] + // Stability: development + // + // [Gitea]: https://gitea.io + VCSProviderNameGitea = VCSProviderNameKey.String("gitea") + // [Bitbucket] + // Stability: development + // + // [Bitbucket]: https://bitbucket.org + VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") +) + +// Enum values for vcs.ref.base.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") +) + +// Enum values for vcs.ref.head.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") +) + +// Enum values for vcs.ref.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefTypeBranch = VCSRefTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefTypeTag = VCSRefTypeKey.String("tag") +) + +// Enum values for vcs.revision_delta.direction +var ( + // How many revisions the change is behind the target ref. + // Stability: development + VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") + // How many revisions the change is ahead of the target ref. + // Stability: development + VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") +) + +// Namespace: webengine +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the additional + // description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final" + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly" + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of the + // web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "21.0.0" + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" +// semantic conventions. It represents the name of the web engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the web +// engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// Namespace: zos +const ( + // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic + // conventions. It represents the System Management Facility (SMF) Identifier + // uniquely identified a z/OS system within a SYSPLEX or mainframe environment + // and is used for system and performance analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYS1" + ZOSSmfIDKey = attribute.Key("zos.smf.id") + + // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name" + // semantic conventions. It represents the name of the SYSPLEX to which the z/OS + // system belongs too. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYSPLEX1" + ZOSSysplexNameKey = attribute.Key("zos.sysplex.name") +) + +// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic +// conventions. It represents the System Management Facility (SMF) Identifier +// uniquely identified a z/OS system within a SYSPLEX or mainframe environment +// and is used for system and performance analysis. +func ZOSSmfID(val string) attribute.KeyValue { + return ZOSSmfIDKey.String(val) +} + +// ZOSSysplexName returns an attribute KeyValue conforming to the +// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX +// to which the z/OS system belongs too. +func ZOSSysplexName(val string) attribute.KeyValue { + return ZOSSysplexNameKey.String(val) +} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/doc.go similarity index 80% rename from vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.38.0/doc.go index d031bbea784..02a68c16760 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/doc.go @@ -4,6 +4,6 @@ // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.26.0 +// patterns for OpenTelemetry things. This package represents the v1.38.0 // version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.38.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/error_type.go new file mode 100644 index 00000000000..84d909c41b3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/error_type.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.38.0" + +import ( + "reflect" + + "go.opentelemetry.io/otel/attribute" +) + +// ErrorType returns an [attribute.KeyValue] identifying the error type of err. +// +// If err is nil, the returned attribute has the default value +// [ErrorTypeOther]. +// +// If err's type has the method +// +// ErrorType() string +// +// then the returned attribute has the value of err.ErrorType(). Otherwise, the +// returned attribute has a value derived from the concrete type of err. +// +// The key of the returned attribute is [ErrorTypeKey]. +func ErrorType(err error) attribute.KeyValue { + if err == nil { + return ErrorTypeOther + } + + return ErrorTypeKey.String(errorType(err)) +} + +func errorType(err error) string { + var s string + if et, ok := err.(interface{ ErrorType() string }); ok { + // Prioritize the ErrorType method if available. + s = et.ErrorType() + } + if s == "" { + // Fallback to reflection if the ErrorType method is not supported or + // returns an empty value. + + t := reflect.TypeOf(err) + pkg, name := t.PkgPath(), t.Name() + if pkg != "" && name != "" { + s = pkg + "." + name + } else { + // The type has no package path or name (predeclared, not-defined, + // or alias for a not-defined type). + // + // This is not guaranteed to be unique, but is a best effort. + s = t.String() + } + } + return s +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/exception.go similarity index 74% rename from vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.38.0/exception.go index bfaee0d56e3..f6187b9f960 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/exception.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.38.0" const ( // ExceptionEventName is the name of the Span event representing an exception. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/schema.go similarity index 71% rename from vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.38.0/schema.go index 4c87c7adcc7..51290d2dcee 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.38.0/schema.go @@ -1,9 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.38.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.38.0" diff --git a/vendor/gonum.org/v1/gonum/AUTHORS b/vendor/gonum.org/v1/gonum/AUTHORS index 1f0e79b4c0b..edb00ae61a8 100644 --- a/vendor/gonum.org/v1/gonum/AUTHORS +++ b/vendor/gonum.org/v1/gonum/AUTHORS @@ -8,7 +8,9 @@ # Please keep the list sorted. +Agnes Widera Alexander Egurnov +Andreas Scharf Andrei Blinnikov antichris Bailey Lissington @@ -94,8 +96,11 @@ Max Halford Maxim Sergeev Microsoft Corporation MinJae Kwon +Mohamed Ali Bouhaouala Nathan Edwards +Nathan Rooy Nick Potts +Nicolas Peugnet Nils Wogatzky Olivier Wulveryck Or Rikon @@ -132,6 +137,7 @@ Tristan Nicholls Valentin Deleplace Vincent Thiery Vladimír Chalupecký +Wang Fumin Will Tekulve Yasuhiro Matsumoto Yevgeniy Vahlis diff --git a/vendor/gonum.org/v1/gonum/CONTRIBUTORS b/vendor/gonum.org/v1/gonum/CONTRIBUTORS index 1fbe736c5f5..bf45623d406 100644 --- a/vendor/gonum.org/v1/gonum/CONTRIBUTORS +++ b/vendor/gonum.org/v1/gonum/CONTRIBUTORS @@ -15,7 +15,9 @@ # # Please keep the list sorted. +Agnes Widera Alexander Egurnov +Andreas Scharf Andrei Blinnikov Andrew Brampton antichris @@ -100,8 +102,11 @@ Matthieu Di Mercurio Max Halford Maxim Sergeev MinJae Kwon +Mohamed Ali Bouhaouala Nathan Edwards +Nathan Rooy Nick Potts +Nicolas Peugnet Nils Wogatzky Olivier Wulveryck Or Rikon @@ -135,6 +140,7 @@ Tristan Nicholls Valentin Deleplace Vincent Thiery Vladimír Chalupecký +Wang Fumin Will Tekulve Yasuhiro Matsumoto Yevgeniy Vahlis diff --git a/vendor/modules.txt b/vendor/modules.txt index c10d8a6d6f0..60f81500aa6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -414,7 +414,7 @@ github.com/eapache/go-xerial-snappy # github.com/eapache/queue v1.1.0 ## explicit github.com/eapache/queue -# github.com/ebitengine/purego v0.9.0 +# github.com/ebitengine/purego v0.9.1 ## explicit; go 1.18 github.com/ebitengine/purego github.com/ebitengine/purego/internal/cgo @@ -511,7 +511,7 @@ github.com/fatih/color # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/foxboron/go-tpm-keyfiles v0.0.0-20250903184740-5d135037bd4d +# github.com/foxboron/go-tpm-keyfiles v0.0.0-20251226215517-609e4778396f ## explicit; go 1.22.0 github.com/foxboron/go-tpm-keyfiles github.com/foxboron/go-tpm-keyfiles/template @@ -592,7 +592,7 @@ github.com/go-redis/redis/v8/internal/util # github.com/go-test/deep v1.1.1 ## explicit; go 1.16 github.com/go-test/deep -# github.com/go-viper/mapstructure/v2 v2.4.0 +# github.com/go-viper/mapstructure/v2 v2.5.0 ## explicit; go 1.18 github.com/go-viper/mapstructure/v2 github.com/go-viper/mapstructure/v2/internal/errors @@ -658,7 +658,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-tpm v0.9.6 +# github.com/google/go-tpm v0.9.8 ## explicit; go 1.22 github.com/google/go-tpm/legacy/tpm2 github.com/google/go-tpm/tpm2 @@ -814,7 +814,7 @@ github.com/hashicorp/go-sockaddr # github.com/hashicorp/go-uuid v1.0.3 ## explicit github.com/hashicorp/go-uuid -# github.com/hashicorp/go-version v1.7.0 +# github.com/hashicorp/go-version v1.8.0 ## explicit github.com/hashicorp/go-version # github.com/hashicorp/golang-lru v1.0.2 @@ -946,15 +946,15 @@ github.com/klauspost/crc32 ## explicit; go 1.12 github.com/knadh/koanf/maps github.com/knadh/koanf/providers/confmap -# github.com/knadh/koanf/v2 v2.3.0 +# github.com/knadh/koanf/v2 v2.3.2 ## explicit; go 1.23.0 github.com/knadh/koanf/v2 # github.com/kylelemons/godebug v1.1.0 ## explicit; go 1.11 github.com/kylelemons/godebug/diff github.com/kylelemons/godebug/pretty -# github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 -## explicit; go 1.16 +# github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 +## explicit; go 1.21 github.com/lufia/plan9stats # github.com/magefile/mage v1.15.0 ## explicit; go 1.12 @@ -1257,7 +1257,7 @@ github.com/pelletier/go-toml/v2/unstable # github.com/philhofer/fwd v1.2.0 ## explicit; go 1.20 github.com/philhofer/fwd -# github.com/pierrec/lz4/v4 v4.1.23 +# github.com/pierrec/lz4/v4 v4.1.25 ## explicit; go 1.17 github.com/pierrec/lz4/v4 github.com/pierrec/lz4/v4/internal/lz4block @@ -1420,8 +1420,8 @@ github.com/segmentio/fasthash/fnv1a # github.com/sercand/kuberesolver/v6 v6.0.0 ## explicit; go 1.22.0 github.com/sercand/kuberesolver/v6 -# github.com/shirou/gopsutil/v4 v4.25.9 -## explicit; go 1.23.0 +# github.com/shirou/gopsutil/v4 v4.26.1 +## explicit; go 1.24.0 github.com/shirou/gopsutil/v4/common github.com/shirou/gopsutil/v4/cpu github.com/shirou/gopsutil/v4/internal/common @@ -1448,7 +1448,7 @@ github.com/spf13/afero/mem ## explicit; go 1.21.0 github.com/spf13/cast github.com/spf13/cast/internal -# github.com/spf13/cobra v1.10.1 +# github.com/spf13/cobra v1.10.2 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/pflag v1.0.10 @@ -1488,11 +1488,11 @@ github.com/subosito/gotenv ## explicit; go 1.24 github.com/tinylib/msgp/msgp github.com/tinylib/msgp/msgp/setof -# github.com/tklauser/go-sysconf v0.3.15 -## explicit; go 1.23.0 +# github.com/tklauser/go-sysconf v0.3.16 +## explicit; go 1.24.0 github.com/tklauser/go-sysconf -# github.com/tklauser/numcpus v0.10.0 -## explicit; go 1.23.0 +# github.com/tklauser/numcpus v0.11.0 +## explicit; go 1.24.0 github.com/tklauser/numcpus # github.com/twitchyliquid64/golang-asm v0.15.1 ## explicit; go 1.13 @@ -1649,98 +1649,97 @@ go.mongodb.org/mongo-driver/x/bsonx/bsoncore ## explicit; go 1.24.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/collector v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/internal/statusutil -# go.opentelemetry.io/collector/client v1.44.0 +# go.opentelemetry.io/collector/client v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/client -# go.opentelemetry.io/collector/component v1.44.0 +# go.opentelemetry.io/collector/component v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/component -# go.opentelemetry.io/collector/component/componentstatus v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/component/componentstatus v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/component/componentstatus -# go.opentelemetry.io/collector/component/componenttest v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/component/componenttest v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/component/componenttest -# go.opentelemetry.io/collector/config/configauth v1.44.0 +# go.opentelemetry.io/collector/config/configauth v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/config/configauth -# go.opentelemetry.io/collector/config/configcompression v1.44.0 +# go.opentelemetry.io/collector/config/configcompression v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/config/configcompression -# go.opentelemetry.io/collector/config/configgrpc v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/config/configgrpc v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/config/configgrpc -# go.opentelemetry.io/collector/config/confighttp v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/config/confighttp v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/config/confighttp go.opentelemetry.io/collector/config/confighttp/internal -# go.opentelemetry.io/collector/config/configmiddleware v1.44.0 +# go.opentelemetry.io/collector/config/configmiddleware v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/config/configmiddleware -# go.opentelemetry.io/collector/config/confignet v1.44.0 +# go.opentelemetry.io/collector/config/confignet v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/config/confignet -# go.opentelemetry.io/collector/config/configopaque v1.44.0 +# go.opentelemetry.io/collector/config/configopaque v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/config/configopaque -# go.opentelemetry.io/collector/config/configoptional v1.44.0 +# go.opentelemetry.io/collector/config/configoptional v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/config/configoptional -# go.opentelemetry.io/collector/config/configretry v1.44.0 +# go.opentelemetry.io/collector/config/configretry v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/config/configretry -# go.opentelemetry.io/collector/config/configtelemetry v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/config/configtelemetry v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/config/configtelemetry -# go.opentelemetry.io/collector/config/configtls v1.44.0 +# go.opentelemetry.io/collector/config/configtls v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/config/configtls -# go.opentelemetry.io/collector/confmap v1.44.0 +# go.opentelemetry.io/collector/confmap v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/confmap go.opentelemetry.io/collector/confmap/internal go.opentelemetry.io/collector/confmap/internal/mapstructure go.opentelemetry.io/collector/confmap/internal/third_party/composehook -# go.opentelemetry.io/collector/confmap/xconfmap v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/confmap/xconfmap v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/confmap/xconfmap -# go.opentelemetry.io/collector/connector v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/connector v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/connector go.opentelemetry.io/collector/connector/internal -# go.opentelemetry.io/collector/connector/connectortest v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/connector/connectortest v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/connector/connectortest -# go.opentelemetry.io/collector/connector/xconnector v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/connector/xconnector v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/connector/xconnector -# go.opentelemetry.io/collector/consumer v1.44.0 +# go.opentelemetry.io/collector/consumer v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/consumer go.opentelemetry.io/collector/consumer/internal -# go.opentelemetry.io/collector/consumer/consumererror v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/consumer/consumererror v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/consumer/consumererror go.opentelemetry.io/collector/consumer/consumererror/internal go.opentelemetry.io/collector/consumer/consumererror/internal/statusconversion -# go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/consumer/consumererror/xconsumererror -# go.opentelemetry.io/collector/consumer/consumertest v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/consumer/consumertest v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/consumer/consumertest -# go.opentelemetry.io/collector/consumer/xconsumer v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/consumer/xconsumer v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/consumer/xconsumer -# go.opentelemetry.io/collector/exporter v1.44.0 +# go.opentelemetry.io/collector/exporter v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/exporter -go.opentelemetry.io/collector/exporter/internal/experr -# go.opentelemetry.io/collector/exporter/exporterhelper v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/exporter/exporterhelper v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/exporter/exporterhelper go.opentelemetry.io/collector/exporter/exporterhelper/internal go.opentelemetry.io/collector/exporter/exporterhelper/internal/experr @@ -1750,75 +1749,66 @@ go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch go.opentelemetry.io/collector/exporter/exporterhelper/internal/request go.opentelemetry.io/collector/exporter/exporterhelper/internal/sender go.opentelemetry.io/collector/exporter/exporterhelper/internal/sizer -# go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper -# go.opentelemetry.io/collector/exporter/exportertest v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/exporter/exportertest v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/exporter/exportertest -# go.opentelemetry.io/collector/exporter/otlpexporter v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/exporter/otlpexporter v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/exporter/otlpexporter go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata -# go.opentelemetry.io/collector/exporter/otlphttpexporter v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/exporter/otlphttpexporter v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/exporter/otlphttpexporter go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata -# go.opentelemetry.io/collector/exporter/xexporter v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/exporter/xexporter v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/exporter/xexporter -# go.opentelemetry.io/collector/extension v1.44.0 +# go.opentelemetry.io/collector/extension v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/extension -# go.opentelemetry.io/collector/extension/extensionauth v1.44.0 +# go.opentelemetry.io/collector/extension/extensionauth v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/extension/extensionauth -# go.opentelemetry.io/collector/extension/extensioncapabilities v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/extension/extensioncapabilities v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/extension/extensioncapabilities -# go.opentelemetry.io/collector/extension/extensionmiddleware v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/extension/extensionmiddleware v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/extension/extensionmiddleware -# go.opentelemetry.io/collector/extension/extensiontest v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/extension/extensiontest v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/extension/extensiontest -# go.opentelemetry.io/collector/extension/xextension v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/extension/xextension v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/extension/xextension/storage -# go.opentelemetry.io/collector/featuregate v1.44.0 +# go.opentelemetry.io/collector/featuregate v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/featuregate -# go.opentelemetry.io/collector/internal/fanoutconsumer v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/internal/componentalias v0.146.0 +## explicit; go 1.25.0 +go.opentelemetry.io/collector/internal/componentalias +# go.opentelemetry.io/collector/internal/fanoutconsumer v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/internal/fanoutconsumer -# go.opentelemetry.io/collector/internal/sharedcomponent v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/internal/sharedcomponent v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/internal/sharedcomponent -# go.opentelemetry.io/collector/internal/telemetry v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/internal/telemetry v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/internal/telemetry -go.opentelemetry.io/collector/internal/telemetry/componentattribute -# go.opentelemetry.io/collector/otelcol v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/otelcol v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/otelcol go.opentelemetry.io/collector/otelcol/internal/configunmarshaler go.opentelemetry.io/collector/otelcol/internal/grpclog -# go.opentelemetry.io/collector/pdata v1.44.0 +# go.opentelemetry.io/collector/pdata v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/pdata/internal -go.opentelemetry.io/collector/pdata/internal/data -go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1 -go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1 -go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development -go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1 -go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1 -go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1 -go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1 -go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development -go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1 -go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1 -go.opentelemetry.io/collector/pdata/internal/grpcencoding go.opentelemetry.io/collector/pdata/internal/json +go.opentelemetry.io/collector/pdata/internal/otelgrpc go.opentelemetry.io/collector/pdata/internal/otlp go.opentelemetry.io/collector/pdata/internal/proto go.opentelemetry.io/collector/pdata/pcommon @@ -1828,26 +1818,25 @@ go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp go.opentelemetry.io/collector/pdata/ptrace go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp -# go.opentelemetry.io/collector/pdata/pprofile v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/pdata/pprofile v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/pdata/pprofile go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp -# go.opentelemetry.io/collector/pdata/testdata v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/pdata/testdata v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/pdata/testdata -# go.opentelemetry.io/collector/pdata/xpdata v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/pdata/xpdata v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/pdata/xpdata/pref go.opentelemetry.io/collector/pdata/xpdata/request -go.opentelemetry.io/collector/pdata/xpdata/request/internal -# go.opentelemetry.io/collector/pipeline v1.44.0 +# go.opentelemetry.io/collector/pipeline v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/pipeline go.opentelemetry.io/collector/pipeline/internal/globalsignal -# go.opentelemetry.io/collector/pipeline/xpipeline v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/pipeline/xpipeline v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/pipeline/xpipeline -# go.opentelemetry.io/collector/processor v1.44.0 +# go.opentelemetry.io/collector/processor v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/processor go.opentelemetry.io/collector/processor/internal @@ -1858,18 +1847,17 @@ go.opentelemetry.io/collector/processor/processorhelper/internal/metadata # go.opentelemetry.io/collector/processor/processorhelper/xprocessorhelper v0.138.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/processor/processorhelper/xprocessorhelper -# go.opentelemetry.io/collector/processor/processortest v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/processor/processortest v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/processor/processortest -# go.opentelemetry.io/collector/processor/xprocessor v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/processor/xprocessor v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/processor/xprocessor -# go.opentelemetry.io/collector/receiver v1.44.0 +# go.opentelemetry.io/collector/receiver v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/receiver -go.opentelemetry.io/collector/receiver/internal -# go.opentelemetry.io/collector/receiver/otlpreceiver v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/receiver/otlpreceiver v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/receiver/otlpreceiver go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs @@ -1877,46 +1865,42 @@ go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace -# go.opentelemetry.io/collector/receiver/receiverhelper v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/receiver/receiverhelper v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/receiver/receiverhelper go.opentelemetry.io/collector/receiver/receiverhelper/internal go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata -# go.opentelemetry.io/collector/receiver/receivertest v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/receiver/receivertest v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/receiver/receivertest -# go.opentelemetry.io/collector/receiver/xreceiver v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/receiver/xreceiver v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/receiver/xreceiver # go.opentelemetry.io/collector/semconv v0.128.1-0.20250610090210-188191247685 ## explicit; go 1.23.0 go.opentelemetry.io/collector/semconv/v1.6.1 -# go.opentelemetry.io/collector/service v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/service v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/service go.opentelemetry.io/collector/service/extensions go.opentelemetry.io/collector/service/internal/attribute go.opentelemetry.io/collector/service/internal/builders go.opentelemetry.io/collector/service/internal/capabilityconsumer +go.opentelemetry.io/collector/service/internal/componentattribute go.opentelemetry.io/collector/service/internal/graph go.opentelemetry.io/collector/service/internal/metadata +go.opentelemetry.io/collector/service/internal/metricviews go.opentelemetry.io/collector/service/internal/moduleinfo go.opentelemetry.io/collector/service/internal/obsconsumer go.opentelemetry.io/collector/service/internal/proctelemetry go.opentelemetry.io/collector/service/internal/refconsumer -go.opentelemetry.io/collector/service/internal/resource go.opentelemetry.io/collector/service/internal/status go.opentelemetry.io/collector/service/internal/zpages go.opentelemetry.io/collector/service/pipelines go.opentelemetry.io/collector/service/telemetry -go.opentelemetry.io/collector/service/telemetry/internal/migration -go.opentelemetry.io/collector/service/telemetry/otelconftelemetry -# go.opentelemetry.io/collector/service/hostcapabilities v0.138.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/collector/service/hostcapabilities v0.146.0 +## explicit; go 1.25.0 go.opentelemetry.io/collector/service/hostcapabilities -# go.opentelemetry.io/contrib/bridges/otelzap v0.13.0 -## explicit; go 1.23.0 -go.opentelemetry.io/contrib/bridges/otelzap # go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/bridges/prometheus @@ -1941,11 +1925,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv # go.opentelemetry.io/contrib/otelconf v0.18.0 ## explicit; go 1.23.0 -go.opentelemetry.io/contrib/otelconf/v0.2.0 go.opentelemetry.io/contrib/otelconf/v0.3.0 -# go.opentelemetry.io/contrib/propagators/b3 v1.38.0 -## explicit; go 1.23.0 -go.opentelemetry.io/contrib/propagators/b3 # go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 ## explicit; go 1.22.0 go.opentelemetry.io/contrib/propagators/jaeger @@ -1973,13 +1953,13 @@ go.opentelemetry.io/otel/semconv/v1.18.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.25.0 -go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/semconv/v1.27.0 go.opentelemetry.io/otel/semconv/v1.34.0 go.opentelemetry.io/otel/semconv/v1.37.0 go.opentelemetry.io/otel/semconv/v1.37.0/httpconv go.opentelemetry.io/otel/semconv/v1.37.0/otelconv go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv +go.opentelemetry.io/otel/semconv/v1.38.0 go.opentelemetry.io/otel/semconv/v1.39.0 go.opentelemetry.io/otel/semconv/v1.39.0/otelconv go.opentelemetry.io/otel/semconv/v1.6.1 @@ -2055,12 +2035,10 @@ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric go.opentelemetry.io/otel/exporters/stdout/stdouttrace go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x -# go.opentelemetry.io/otel/log v0.14.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/log v0.16.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/log go.opentelemetry.io/otel/log/embedded -go.opentelemetry.io/otel/log/global -go.opentelemetry.io/otel/log/internal/global go.opentelemetry.io/otel/log/noop # go.opentelemetry.io/otel/metric v1.40.0 ## explicit; go 1.24.0 @@ -2262,8 +2240,8 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# gonum.org/v1/gonum v0.16.0 -## explicit; go 1.23.0 +# gonum.org/v1/gonum v0.17.0 +## explicit; go 1.24.0 gonum.org/v1/gonum/blas gonum.org/v1/gonum/blas/blas64 gonum.org/v1/gonum/blas/cblas128