From 98d9dde3060c9dd0d300e6128b7d6d6b77a3909b Mon Sep 17 00:00:00 2001 From: Anand Rajagopal Date: Tue, 4 Mar 2025 22:11:03 +0000 Subject: [PATCH 1/3] Upgrade to Prometheus v0.302.1 Signed-off-by: Anand Rajagopal --- go.mod | 84 +- go.sum | 234 +++-- pkg/api/handlers.go | 2 + pkg/configs/userconfig/config.go | 4 +- pkg/cortex/configinit/init.go | 7 + pkg/cortex/cortex.go | 1 + pkg/distributor/distributor.go | 8 +- pkg/distributor/distributor_test.go | 1 + pkg/querier/error_translate_queryable_test.go | 2 + pkg/querier/metadata_handler.go | 6 +- pkg/querier/metadata_handler_test.go | 2 +- pkg/querier/stats_renderer_test.go | 2 + .../metadata_merge_querier_test.go | 26 +- pkg/ruler/compat.go | 1 + pkg/ruler/rulestore/local/local.go | 2 +- pkg/util/validation/validate_test.go | 1 + vendor/cloud.google.com/go/auth/CHANGES.md | 14 + .../internal/impersonate/idtoken.go | 105 +++ .../go/auth/internal/internal.go | 6 + .../go/auth/oauth2adapt/CHANGES.md | 7 + .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 7 + .../internal/resource/resource_identifier.go | 15 + .../sdk/azcore/internal/pollers/op/op.go | 12 +- .../sdk/azcore/internal/shared/constants.go | 2 +- .../sdk/azcore/runtime/pager.go | 1 + .../sdk/azcore/runtime/poller.go | 11 +- .../sdk/azidentity/BREAKING_CHANGES.md | 10 + .../sdk/azidentity/CHANGELOG.md | 14 + .../azure-sdk-for-go/sdk/azidentity/README.md | 24 +- .../sdk/azidentity/TOKEN_CACHING.MD | 14 +- .../sdk/azidentity/TROUBLESHOOTING.md | 24 + .../sdk/azidentity/azidentity.go | 24 +- .../sdk/azidentity/azure_cli_credential.go | 6 +- .../azure_developer_cli_credential.go | 6 +- .../azidentity/chained_token_credential.go | 8 +- .../azure-sdk-for-go/sdk/azidentity/ci.yml | 15 +- .../sdk/azidentity/confidential_client.go | 2 +- .../azidentity/default_azure_credential.go | 14 +- .../sdk/azidentity/device_code_credential.go | 5 +- .../interactive_browser_credential.go | 5 +- .../sdk/azidentity/managed_identity_client.go | 39 +- .../sdk/azidentity/public_client.go | 9 +- .../sdk/azidentity/test-resources-post.ps1 | 16 +- .../sdk/azidentity/version.go | 2 +- .../apps/confidential/confidential.go | 9 +- .../apps/internal/base/base.go | 30 +- .../apps/internal/json/json.go | 54 +- .../apps/internal/local/server.go | 3 +- .../apps/internal/oauth/oauth.go | 5 +- .../internal/oauth/ops/authority/authority.go | 109 ++- .../internal/oauth/ops/internal/comm/comm.go | 11 +- .../apps/internal/oauth/resolvers.go | 17 +- .../aws-sdk-go/aws/session/shared_config.go | 2 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../internal/proto/v2/s2a_go_proto/s2a.pb.go | 276 +++--- .../google/s2a-go/internal/v2/s2av2.go | 13 +- .../v2/tlsconfigstore/tlsconfigstore.go | 2 +- vendor/github.com/google/s2a-go/s2a.go | 8 +- .../github.com/google/s2a-go/s2a_options.go | 77 +- .../google/s2a-go/stream/s2a_stream.go | 5 + .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 12 + .../googleapis/gax-go/v2/internal/version.go | 2 +- .../gax-go/v2/internallog/internallog.go | 2 +- .../grpc-gateway/v2/runtime/query.go | 2 +- vendor/github.com/miekg/dns/README.md | 1 + vendor/github.com/miekg/dns/dnssec.go | 42 +- vendor/github.com/miekg/dns/edns.go | 36 +- ...useport.go => listen_no_socket_options.go} | 22 +- ..._reuseport.go => listen_socket_options.go} | 31 + vendor/github.com/miekg/dns/server.go | 1 + vendor/github.com/miekg/dns/sig0.go | 3 +- vendor/github.com/miekg/dns/version.go | 2 +- .../internal/exp/metrics/LICENSE | 201 +++++ .../internal/exp/metrics/identity/doc.go | 8 + .../internal/exp/metrics/identity/metric.go | 71 ++ .../internal/exp/metrics/identity/resource.go | 31 + .../internal/exp/metrics/identity/scope.go | 43 + .../internal/exp/metrics/identity/stream.go | 35 + .../internal/exp/metrics/identity/strings.go | 24 + .../exp/metrics/staleness/priority_queue.go | 111 +++ .../exp/metrics/staleness/staleness.go | 134 +++ .../internal/exp/metrics/streams/streams.go | 81 ++ .../pkg/pdatautil/LICENSE | 201 +++++ .../pkg/pdatautil/Makefile | 1 + .../pkg/pdatautil/hash.go | 207 +++++ .../pkg/pdatautil/metadata.yaml | 3 + .../deltatocumulativeprocessor/LICENSE | 201 +++++ .../deltatocumulativeprocessor/Makefile | 1 + .../deltatocumulativeprocessor/README.md | 42 + .../deltatocumulativeprocessor/config.go | 48 ++ .../deltatocumulativeprocessor/doc.go | 8 + .../documentation.md | 79 ++ .../deltatocumulativeprocessor/factory.go | 38 + .../internal/data/add.go | 114 +++ .../internal/data/data.go | 26 + .../internal/data/expo/expo.go | 52 ++ .../internal/data/expo/merge.go | 37 + .../internal/data/expo/ord.go | 16 + .../internal/data/expo/scale.go | 115 +++ .../internal/data/expo/zero.go | 68 ++ .../internal/delta/delta.go | 65 ++ .../internal/metadata/generated_status.go | 16 + .../internal/metadata/generated_telemetry.go | 135 +++ .../internal/metrics/data.go | 74 ++ .../internal/metrics/iter.go | 20 + .../internal/metrics/metrics.go | 110 +++ .../internal/putil/pslice/pslice.go | 31 + .../internal/telemetry/attr.go | 12 + .../internal/telemetry/metrics.go | 71 ++ .../deltatocumulativeprocessor/metadata.yaml | 78 ++ .../deltatocumulativeprocessor/processor.go | 214 +++++ .../prometheus/client_golang/api/client.go | 8 + .../client_golang/prometheus/atomic_update.go | 50 ++ .../client_golang/prometheus/counter.go | 10 +- .../client_golang/prometheus/desc.go | 15 +- .../client_golang/prometheus/gauge.go | 10 +- .../prometheus/go_collector_latest.go | 2 +- .../client_golang/prometheus/histogram.go | 259 +++++- .../prometheus/internal/difflib.go | 19 +- .../prometheus/internal/go_runtime_metrics.go | 3 +- .../client_golang/prometheus/metric.go | 24 +- .../prometheus/process_collector.go | 31 +- .../prometheus/process_collector_cgo_darwin.c | 84 ++ .../process_collector_cgo_darwin.go | 51 ++ .../prometheus/process_collector_darwin.go | 128 +++ .../process_collector_nocgo_darwin.go | 39 + .../prometheus/process_collector_other.go | 20 +- .../prometheus/process_collector_wasip1.go | 26 - ...r_js.go => process_collector_wasip1_js.go} | 17 +- .../prometheus/process_collector_windows.go | 21 +- .../client_golang/prometheus/promhttp/http.go | 23 +- .../client_golang/prometheus/summary.go | 32 +- .../validations/duplicate_validations.go | 4 +- .../prometheus/testutil/testutil.go | 8 +- .../prometheus/common/config/http_config.go | 22 +- .../common/expfmt/openmetrics_create.go | 4 +- .../prometheus/common/model/metric.go | 14 +- .../prometheus/prometheus/config/config.go | 2 +- .../prometheus/model/relabel/relabel.go | 21 +- .../prometheus/model/rulefmt/rulefmt.go | 18 +- .../prometheus/model/textparse/nhcbparse.go | 90 +- .../prometheus/notifier/notifier.go | 2 +- .../prometheus/prometheus/promql/engine.go | 34 +- .../prometheus/prometheus/promql/functions.go | 126 ++- .../promql/parser/generated_parser.y | 11 +- .../promql/parser/generated_parser.y.go | 11 +- .../prometheus/prometheus/promql/quantile.go | 1 - .../prometheus/prometheus/rules/alerting.go | 53 +- .../prometheus/prometheus/rules/group.go | 134 +-- .../prometheus/prometheus/rules/manager.go | 115 ++- .../prometheus/prometheus/rules/recording.go | 54 +- .../prometheus/prometheus/rules/rule.go | 14 +- .../prometheus/prometheus/scrape/manager.go | 6 +- .../prometheus/prometheus/scrape/scrape.go | 373 +++++--- .../prometheus/prometheus/scrape/target.go | 130 +-- .../prometheus/storage/remote/client.go | 14 +- .../storage/remote/metadata_watcher.go | 2 +- .../prometheus/helpers_from_stdlib.go | 106 --- ...rmalize_name.go => metric_name_builder.go} | 258 +++--- .../prometheusremotewrite/metrics_to_prw.go | 7 +- .../storage/remote/queue_manager.go | 13 +- .../prometheus/storage/remote/read_handler.go | 8 +- .../storage/remote/write_handler.go | 173 +++- .../prometheus/template/template.go | 4 +- .../prometheus/prometheus/tsdb/block.go | 13 +- .../prometheus/prometheus/tsdb/db.go | 2 +- .../prometheus/prometheus/tsdb/head.go | 12 +- .../prometheus/prometheus/tsdb/head_append.go | 52 +- .../prometheus/prometheus/tsdb/head_read.go | 15 +- .../prometheus/prometheus/tsdb/head_wal.go | 10 +- .../prometheus/tsdb/index/postings.go | 63 +- .../prometheus/tsdb/record/record.go | 121 ++- .../prometheus/prometheus/tsdb/testutil.go | 34 +- .../prometheus/tsdb/tsdbutil/histogram.go | 22 + .../prometheus/tsdb/wlog/checkpoint.go | 49 +- .../prometheus/tsdb/wlog/watcher.go | 6 +- .../util/annotations/annotations.go | 8 + .../prometheus/util/logging/file.go | 57 +- .../prometheus/util/testutil/port.go | 8 +- .../prometheus/prometheus/web/api/v1/api.go | 116 ++- vendor/github.com/prometheus/sigv4/.yamllint | 10 +- .../prometheus/sigv4/Makefile.common | 2 +- .../collector/component/LICENSE | 202 +++++ .../collector/component/Makefile | 1 + .../collector/component/build_info.go | 26 + .../collector/component/component.go | 207 +++++ .../collector/component/config.go | 95 ++ .../collector/component/doc.go | 8 + .../collector/component/host.go | 21 + .../collector/component/identifiable.go | 177 ++++ .../collector/component/telemetry.go | 34 + .../collector/config/configtelemetry/LICENSE | 202 +++++ .../collector/config/configtelemetry/Makefile | 1 + .../config/configtelemetry/configtelemetry.go | 73 ++ .../collector/config/configtelemetry/doc.go | 47 + .../collector/consumer/LICENSE | 202 +++++ .../collector/consumer/Makefile | 1 + .../collector/consumer/consumer.go | 26 + .../collector/consumer/doc.go | 5 + .../collector/consumer/internal/consumer.go | 50 ++ .../collector/consumer/logs.go | 43 + .../collector/consumer/metrics.go | 43 + .../collector/consumer/traces.go | 43 + .../internal/data/protogen/logs/v1/logs.pb.go | 192 +++-- .../data/protogen/metrics/v1/metrics.pb.go | 12 +- .../profiles/v1development/profiles.pb.go | 815 +++++++----------- .../data/protogen/trace/v1/trace.pb.go | 6 +- .../collector/pdata/plog/encoding.go | 31 + .../pdata/plog/generated_logrecord.go | 173 ++++ .../pdata/plog/generated_logrecordslice.go | 152 ++++ .../pdata/plog/generated_resourcelogs.go | 76 ++ .../pdata/plog/generated_resourcelogsslice.go | 152 ++++ .../pdata/plog/generated_scopelogs.go | 76 ++ .../pdata/plog/generated_scopelogsslice.go | 152 ++++ .../collector/pdata/plog/json.go | 135 +++ .../collector/pdata/plog/log_record_flags.go | 28 + .../collector/pdata/plog/logs.go | 66 ++ .../collector/pdata/plog/pb.go | 33 + .../collector/pdata/plog/severity_number.go | 96 +++ .../collector/pdata/ptrace/encoding.go | 31 + .../pdata/ptrace/generated_resourcespans.go | 76 ++ .../ptrace/generated_resourcespansslice.go | 152 ++++ .../pdata/ptrace/generated_scopespans.go | 76 ++ .../pdata/ptrace/generated_scopespansslice.go | 152 ++++ .../collector/pdata/ptrace/generated_span.go | 216 +++++ .../pdata/ptrace/generated_spanevent.go | 95 ++ .../pdata/ptrace/generated_spaneventslice.go | 152 ++++ .../pdata/ptrace/generated_spanlink.go | 115 +++ .../pdata/ptrace/generated_spanlinkslice.go | 152 ++++ .../pdata/ptrace/generated_spanslice.go | 152 ++++ .../pdata/ptrace/generated_status.go | 76 ++ .../collector/pdata/ptrace/json.go | 217 +++++ .../collector/pdata/ptrace/pb.go | 31 + .../collector/pdata/ptrace/span_kind.go | 54 ++ .../collector/pdata/ptrace/status_code.go | 31 + .../collector/pdata/ptrace/traces.go | 65 ++ .../collector/pipeline/LICENSE | 202 +++++ .../collector/pipeline/Makefile | 1 + .../pipeline/internal/globalsignal/signal.go | 51 ++ .../collector/pipeline/pipeline.go | 131 +++ .../collector/pipeline/signal.go | 22 + .../collector/processor/LICENSE | 202 +++++ .../collector/processor/Makefile | 1 + .../collector/processor/README.md | 109 +++ .../collector/processor/processor.go | 205 +++++ .../http/httptrace/otelhttptrace/version.go | 2 +- .../net/http/otelhttp/handler.go | 45 +- .../net/http/otelhttp/internal/semconv/env.go | 117 ++- .../net/http/otelhttp/internal/semconv/gen.go | 14 + .../otelhttp/internal/semconv/httpconv.go | 171 ++++ .../http/otelhttp/internal/semconv/util.go | 13 + .../http/otelhttp/internal/semconv/v1.20.0.go | 18 +- .../net/http/otelhttp/transport.go | 2 +- .../net/http/otelhttp/version.go | 2 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 21 +- vendor/go.opentelemetry.io/otel/README.md | 2 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 6 +- .../otlp/otlptrace/otlptracegrpc/client.go | 2 +- .../otel/exporters/otlp/otlptrace/version.go | 2 +- vendor/go.opentelemetry.io/otel/renovate.json | 6 - .../go.opentelemetry.io/otel/sdk/version.go | 2 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 6 +- .../proto/otlp/trace/v1/trace.pb.go | 6 +- vendor/go.uber.org/zap/.golangci.yml | 77 ++ vendor/go.uber.org/zap/.readme.tmpl | 10 +- vendor/go.uber.org/zap/CHANGELOG.md | 343 +++++--- vendor/go.uber.org/zap/CONTRIBUTING.md | 21 +- .../go.uber.org/zap/{LICENSE.txt => LICENSE} | 0 vendor/go.uber.org/zap/Makefile | 87 +- vendor/go.uber.org/zap/README.md | 63 +- vendor/go.uber.org/zap/array.go | 127 +++ vendor/go.uber.org/zap/buffer/buffer.go | 5 + vendor/go.uber.org/zap/buffer/pool.go | 20 +- vendor/go.uber.org/zap/config.go | 88 +- vendor/go.uber.org/zap/doc.go | 60 +- vendor/go.uber.org/zap/encoder.go | 2 +- vendor/go.uber.org/zap/error.go | 14 +- vendor/go.uber.org/zap/field.go | 196 +++-- vendor/go.uber.org/zap/http_handler.go | 44 +- vendor/go.uber.org/zap/internal/exit/exit.go | 22 +- .../go.uber.org/zap/internal/level_enabler.go | 37 + vendor/go.uber.org/zap/internal/pool/pool.go | 58 ++ .../stacktrace/stack.go} | 83 +- vendor/go.uber.org/zap/level.go | 12 +- vendor/go.uber.org/zap/logger.go | 112 ++- vendor/go.uber.org/zap/options.go | 36 +- vendor/go.uber.org/zap/sink.go | 101 ++- vendor/go.uber.org/zap/sugar.go | 239 ++++- vendor/go.uber.org/zap/writer.go | 23 +- .../zap/zapcore/buffered_write_syncer.go | 31 + .../zap/zapcore/console_encoder.go | 16 +- vendor/go.uber.org/zap/zapcore/core.go | 15 +- vendor/go.uber.org/zap/zapcore/encoder.go | 24 +- vendor/go.uber.org/zap/zapcore/entry.go | 94 +- vendor/go.uber.org/zap/zapcore/error.go | 28 +- vendor/go.uber.org/zap/zapcore/field.go | 2 +- vendor/go.uber.org/zap/zapcore/hook.go | 9 + .../go.uber.org/zap/zapcore/increase_level.go | 9 + .../go.uber.org/zap/zapcore/json_encoder.go | 161 ++-- vendor/go.uber.org/zap/zapcore/lazy_with.go | 54 ++ vendor/go.uber.org/zap/zapcore/level.go | 42 + vendor/go.uber.org/zap/zapcore/sampler.go | 34 +- vendor/go.uber.org/zap/zapcore/tee.go | 17 +- vendor/go.uber.org/zap/zapgrpc/zapgrpc.go | 36 +- vendor/golang.org/x/crypto/pkcs12/crypto.go | 2 +- vendor/golang.org/x/net/http2/config.go | 2 +- vendor/golang.org/x/net/http2/config_go124.go | 2 +- vendor/golang.org/x/net/http2/transport.go | 13 +- .../clientcredentials/clientcredentials.go | 2 +- .../x/oauth2/google/externalaccount/aws.go | 2 +- .../google/externalaccount/basecredentials.go | 2 +- vendor/golang.org/x/oauth2/oauth2.go | 2 +- .../x/sys/unix/syscall_dragonfly.go | 12 + .../golang.org/x/sys/windows/dll_windows.go | 11 +- .../golang.org/x/tools/go/packages/golist.go | 5 + .../x/tools/go/packages/loadmode_string.go | 1 + .../x/tools/go/packages/packages.go | 7 + .../x/tools/go/types/typeutil/map.go | 246 +++--- .../x/tools/internal/gcimporter/exportdata.go | 415 +++++++-- .../x/tools/internal/gcimporter/gcimporter.go | 179 +--- .../x/tools/internal/gcimporter/iimport.go | 8 +- .../x/tools/internal/gcimporter/support.go | 30 + .../tools/internal/gcimporter/ureader_yes.go | 9 +- .../x/tools/internal/stdlib/manifest.go | 219 +++++ .../x/tools/internal/typeparams/common.go | 72 -- .../tools/internal/typesinternal/qualifier.go | 46 + .../x/tools/internal/typesinternal/recv.go | 2 + .../x/tools/internal/typesinternal/types.go | 1 + .../tools/internal/typesinternal/zerovalue.go | 254 ++++-- .../api/googleapi/googleapi.go | 38 +- .../iamcredentials/v1/iamcredentials-gen.go | 2 +- .../api/internal/gensupport/resumable.go | 18 +- .../google.golang.org/api/internal/version.go | 2 +- .../api/storage/v1/storage-gen.go | 2 +- .../rpc/errdetails/error_details.pb.go | 128 +-- .../protobuf/encoding/protojson/decode.go | 2 +- .../protobuf/encoding/prototext/decode.go | 2 +- .../protobuf/internal/flags/flags.go | 5 + .../protobuf/internal/impl/codec_map.go | 14 +- .../protobuf/internal/impl/codec_map_go111.go | 38 - .../protobuf/internal/impl/codec_map_go112.go | 12 - .../protobuf/internal/impl/codec_message.go | 4 +- .../internal/impl/codec_message_opaque.go | 6 +- .../protobuf/internal/impl/convert_map.go | 2 +- .../protobuf/internal/impl/message.go | 12 +- .../protobuf/internal/impl/message_opaque.go | 36 +- .../internal/impl/message_reflect_field.go | 56 +- .../protobuf/internal/impl/pointer_unsafe.go | 2 +- .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 2 +- .../reflect/protodesc/desc_validate.go | 2 +- .../protobuf/reflect/protodesc/editions.go | 48 +- .../types/descriptorpb/descriptor.pb.go | 12 +- .../types/gofeaturespb/go_features.pb.go | 12 +- .../protobuf/types/known/anypb/any.pb.go | 12 +- .../types/known/durationpb/duration.pb.go | 12 +- .../protobuf/types/known/emptypb/empty.pb.go | 12 +- .../types/known/fieldmaskpb/field_mask.pb.go | 12 +- .../types/known/structpb/struct.pb.go | 12 +- .../types/known/timestamppb/timestamp.pb.go | 12 +- .../types/known/wrapperspb/wrappers.pb.go | 12 +- vendor/modules.txt | 140 +-- 364 files changed, 15572 insertions(+), 3722 deletions(-) create mode 100644 pkg/cortex/configinit/init.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go rename vendor/github.com/miekg/dns/{listen_no_reuseport.go => listen_no_socket_options.go} (61%) rename vendor/github.com/miekg/dns/{listen_reuseport.go => listen_socket_options.go} (66%) create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/LICENSE create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/doc.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/metric.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/resource.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/scope.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/stream.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/strings.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness/priority_queue.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness/staleness.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams/streams.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/LICENSE create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/Makefile create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/metadata.yaml create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/LICENSE create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/Makefile create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/README.md create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/config.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/doc.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/factory.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/data.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/expo.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/merge.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/ord.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/scale.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/zero.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta/delta.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_status.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/data.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/iter.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/metrics.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/putil/pslice/pslice.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/attr.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.c create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go rename vendor/github.com/prometheus/client_golang/prometheus/{process_collector_js.go => process_collector_wasip1_js.go} (57%) delete mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go rename vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/{normalize_name.go => metric_name_builder.go} (54%) create mode 100644 vendor/go.opentelemetry.io/collector/component/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/component/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/component/build_info.go create mode 100644 vendor/go.opentelemetry.io/collector/component/component.go create mode 100644 vendor/go.opentelemetry.io/collector/component/config.go create mode 100644 vendor/go.opentelemetry.io/collector/component/doc.go create mode 100644 vendor/go.opentelemetry.io/collector/component/host.go create mode 100644 vendor/go.opentelemetry.io/collector/component/identifiable.go create mode 100644 vendor/go.opentelemetry.io/collector/component/telemetry.go create mode 100644 vendor/go.opentelemetry.io/collector/config/configtelemetry/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/config/configtelemetry/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/config/configtelemetry/configtelemetry.go create mode 100644 vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go create mode 100644 vendor/go.opentelemetry.io/collector/consumer/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/consumer/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/consumer/consumer.go create mode 100644 vendor/go.opentelemetry.io/collector/consumer/doc.go create mode 100644 vendor/go.opentelemetry.io/collector/consumer/internal/consumer.go create mode 100644 vendor/go.opentelemetry.io/collector/consumer/logs.go create mode 100644 vendor/go.opentelemetry.io/collector/consumer/metrics.go create mode 100644 vendor/go.opentelemetry.io/collector/consumer/traces.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/encoding.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/json.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/log_record_flags.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/logs.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/encoding.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/ptrace/traces.go create mode 100644 vendor/go.opentelemetry.io/collector/pipeline/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/pipeline/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/pipeline/internal/globalsignal/signal.go create mode 100644 vendor/go.opentelemetry.io/collector/pipeline/pipeline.go create mode 100644 vendor/go.opentelemetry.io/collector/pipeline/signal.go create mode 100644 vendor/go.opentelemetry.io/collector/processor/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/processor/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/processor/README.md create mode 100644 vendor/go.opentelemetry.io/collector/processor/processor.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go create mode 100644 vendor/go.uber.org/zap/.golangci.yml rename vendor/go.uber.org/zap/{LICENSE.txt => LICENSE} (100%) create mode 100644 vendor/go.uber.org/zap/internal/level_enabler.go create mode 100644 vendor/go.uber.org/zap/internal/pool/pool.go rename vendor/go.uber.org/zap/{stacktrace.go => internal/stacktrace/stack.go} (72%) create mode 100644 vendor/go.uber.org/zap/zapcore/lazy_with.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/qualifier.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go diff --git a/go.mod b/go.mod index b06af603af4..a3cfad1ee12 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b github.com/alicebob/miniredis/v2 v2.34.0 github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go v1.55.5 + github.com/aws/aws-sdk-go v1.55.6 github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 github.com/cortexproject/promqlsmith v0.0.0-20250203072244-cbb5738d00ca github.com/dustin/go-humanize v1.0.1 @@ -41,11 +41,11 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.28.0 - github.com/prometheus/client_golang v1.20.5 + github.com/prometheus/client_golang v1.21.0-rc.0 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.61.0 + github.com/prometheus/common v0.62.0 // Prometheus maps version 2.x.y to tags v0.x.y. - github.com/prometheus/prometheus v0.301.0 + github.com/prometheus/prometheus v0.302.1 github.com/segmentio/fasthash v1.0.3 github.com/sony/gobreaker v1.0.0 github.com/spf13/afero v1.11.0 @@ -59,17 +59,17 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.17 go.etcd.io/etcd/client/v3 v3.5.17 go.opentelemetry.io/contrib/propagators/aws v1.33.0 - go.opentelemetry.io/otel v1.33.0 + go.opentelemetry.io/otel v1.34.0 go.opentelemetry.io/otel/bridge/opentracing v1.33.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 - go.opentelemetry.io/otel/sdk v1.33.0 - go.opentelemetry.io/otel/trace v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 + go.opentelemetry.io/otel/sdk v1.34.0 + go.opentelemetry.io/otel/trace v1.34.0 go.uber.org/atomic v1.11.0 - golang.org/x/net v0.33.0 + golang.org/x/net v0.34.0 golang.org/x/sync v0.10.0 - golang.org/x/time v0.8.0 - google.golang.org/grpc v1.69.0 + golang.org/x/time v0.9.0 + google.golang.org/grpc v1.70.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -83,23 +83,23 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/sercand/kuberesolver/v5 v5.1.1 github.com/tjhop/slog-gokit v0.1.2 - go.opentelemetry.io/collector/pdata v1.22.0 + go.opentelemetry.io/collector/pdata v1.24.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 - google.golang.org/protobuf v1.36.1 + google.golang.org/protobuf v1.36.4 ) require ( cloud.google.com/go v0.115.1 // indirect - cloud.google.com/go/auth v0.13.0 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/auth v0.14.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect cloud.google.com/go/compute/metadata v0.6.0 // indirect cloud.google.com/go/iam v1.1.13 // indirect cloud.google.com/go/storage v1.43.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -115,7 +115,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 // indirect github.com/aws/smithy-go v1.13.3 // indirect - github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/caio/go-tdigest v3.1.0+incompatible // indirect @@ -131,6 +130,7 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/edsrzf/mmap-go v1.2.0 // indirect github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd // indirect + github.com/envoyproxy/go-control-plane v0.12.0 // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect @@ -153,12 +153,12 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.1.2 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect - github.com/google/s2a-go v0.1.8 // indirect + github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect - github.com/googleapis/gax-go/v2 v2.14.0 // indirect + github.com/googleapis/gax-go/v2 v2.14.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -183,7 +183,7 @@ require ( github.com/mdlayher/socket v0.4.1 // indirect github.com/mdlayher/vsock v1.2.1 // indirect github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a // indirect - github.com/miekg/dns v1.1.62 // indirect + github.com/miekg/dns v1.1.63 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -193,13 +193,16 @@ require ( github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/ncw/swift v1.0.53 // indirect github.com/oklog/run v1.1.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus-community/prom-label-proxy v0.8.1-0.20240127162815-c1195f9aabc0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/exporter-toolkit v0.13.2 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/prometheus/sigv4 v0.1.0 // indirect + github.com/prometheus/sigv4 v0.1.1 // indirect github.com/redis/rueidis v1.0.45-alpha.1 // indirect github.com/rs/cors v1.11.1 // indirect github.com/rs/xid v1.6.0 // indirect @@ -219,32 +222,37 @@ require ( go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/collector/semconv v0.116.0 // indirect + go.opentelemetry.io/collector/component v0.118.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/consumer v1.24.0 // indirect + go.opentelemetry.io/collector/pipeline v0.118.0 // indirect + go.opentelemetry.io/collector/processor v0.118.0 // indirect + go.opentelemetry.io/collector/semconv v0.118.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.59.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect go.opentelemetry.io/contrib/propagators/autoprop v0.54.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.29.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.29.0 // indirect go.opentelemetry.io/contrib/propagators/ot v1.29.0 // indirect - go.opentelemetry.io/otel/metric v1.33.0 // indirect - go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect go.uber.org/goleak v1.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.21.0 // indirect + go.uber.org/zap v1.27.0 // indirect go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect - golang.org/x/crypto v0.31.0 // indirect + golang.org/x/crypto v0.32.0 // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/oauth2 v0.24.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/tools v0.28.0 // indirect + golang.org/x/tools v0.29.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/api v0.213.0 // indirect + google.golang.org/api v0.218.0 // indirect google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/telebot.v3 v3.3.8 // indirect k8s.io/apimachinery v0.31.3 // indirect diff --git a/go.sum b/go.sum index 51d7d8bf1ac..8fd28a4599e 100644 --- a/go.sum +++ b/go.sum @@ -117,10 +117,10 @@ cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEar cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= -cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= -cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= -cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= -cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/auth v0.14.0 h1:A5C4dKV/Spdvxcl0ggWwWEzzP7AZMJSEIgrkngwhGYM= +cloud.google.com/go/auth v0.14.0/go.mod h1:CYsoRL1PdiDuqeQpZE0bP2pnPrGqFcOkI0nldEQis+A= +cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M= +cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= @@ -767,12 +767,12 @@ cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvo dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 h1:1mvYtZfWQAnwNah/C+Z+Jb9rQH95LPE2vlmMuWAHJk8= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1/go.mod h1:75I/mXtme1JyWFtz8GocPHVFyH421IBoZErnO16dd0k= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1 h1:Bk5uOhSAenHyR5P61D/NzeQCv+4fEVV8mOkJ82NqpWw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1/go.mod h1:QZ4pw3or1WPmRBxf0cHd1tknzrT54WPBOQoGutCPvSU= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= @@ -787,8 +787,8 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25 github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= @@ -843,8 +843,8 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= -github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= +github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk= github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= @@ -883,8 +883,6 @@ github.com/baidubce/bce-sdk-go v0.9.111/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTF github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= -github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -991,17 +989,16 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= -github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= -github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= -github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -1085,9 +1082,11 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+ github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= -github.com/go-resty/resty/v2 v2.15.3 h1:bqff+hcqAflpiF591hhJzNdkRsFhlB96CYfBwSFvql8= -github.com/go-resty/resty/v2 v2.15.3/go.mod h1:0fHAoK7JoBy/Ch36N8VFeMsK7xQOHhvWaC3iOktwmIU= +github.com/go-resty/resty/v2 v2.16.3 h1:zacNT7lt4b8M/io2Ahj6yPypL7bqx9n1iprfQuodV+E= +github.com/go-resty/resty/v2 v2.16.3/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= @@ -1215,8 +1214,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= -github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= -github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -1242,12 +1241,12 @@ github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38 github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= -github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= -github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw= -github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud/v2 v2.4.0 h1:XhP5tVEH3ni66NSNK1+0iSO6kaGPH/6srtx6Cr+8eCg= +github.com/gophercloud/gophercloud/v2 v2.4.0/go.mod h1:uJWNpTgJPSl2gyzJqcU/pIAhFUWvIkp8eE8M15n9rs4= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -1265,8 +1264,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/api v1.31.0 h1:32BUNLembeSRek0G/ZAM6WNfdEwYdYo8oQ4+JoqGkNQ= github.com/hashicorp/consul/api v1.31.0/go.mod h1:2ZGIiXM3A610NmDULmCHd/aqBJj8CkMfOhswhOafxRg= @@ -1327,8 +1326,8 @@ github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpT github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.17.1 h1:DPi019dv0WCiECEmtcuTgc//hBvnxESb6QlJnAb4a04= -github.com/hetznercloud/hcloud-go/v2 v2.17.1/go.mod h1:6ygmBba+FdawR2lLp/d9uJljY2k0dTYthprrI8usdLw= +github.com/hetznercloud/hcloud-go/v2 v2.18.0 h1:BemrVGeWI8Kn/pvaC1jBsHZxQMnRqOydS7Ju4BERB4Q= +github.com/hetznercloud/hcloud-go/v2 v2.18.0/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible h1:tKTaPHNVwikS3I1rdyf1INNvgJXWSf/+TzqsiGbrgnQ= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= @@ -1377,6 +1376,12 @@ github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02 github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= +github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= +github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= +github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1407,8 +1412,8 @@ github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/linode/linodego v1.43.0 h1:sGeBB3caZt7vKBoPS5p4AVzmlG4JoqQOdigIibx3egk= -github.com/linode/linodego v1.43.0/go.mod h1:n4TMFu1UVNala+icHqrTEFFaicYSF74cSAUG5zkTwfA= +github.com/linode/linodego v1.46.0 h1:+uOG4SD2MIrhbrLrvOD5HrbdLN3D19Wgn3MgdUNQjeU= +github.com/linode/linodego v1.46.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= @@ -1450,8 +1455,8 @@ github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a h1:0usWxe5SGXKQo github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a/go.mod h1:3OETvrxfELvGsU2RoGGWercfeZ4bCL3+SOwzIWtJH/Q= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= -github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= +github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY= +github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= @@ -1461,6 +1466,8 @@ github.com/minio/minio-go/v7 v7.0.82/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5 github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -1471,6 +1478,8 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= @@ -1504,6 +1513,14 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 h1:Kxk5Ral+Dc6VB9UmTketVjs+rbMZP8JxQ4SXDx4RivQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0/go.mod h1:ctT6oQmGmWGGGgUIKyx2fDwqz77N9+04gqKkDyAzKCg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0 h1:RlEK9MbxWyBHbLel8EJ1L7DbYVLai9dZL6Ljl2cBgyA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0/go.mod h1:AVUEyIjPb+0ARr7mhIkZkdNg3fd0ZcRhzAi53oZhl1Q= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 h1:jwnZYRBuPJnsKXE5H6ZvTEm91bXW5VP8+tLewzl54eg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0/go.mod h1:NT3Ag+DdnIAZQfD7l7OHwlYqnaAJ19SoPZ0nhD9yx4s= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 h1:ZBmLuipJv7BT9fho/2yAFsS8AtMsCOCe4ON8oqkX3n8= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0/go.mod h1:f0GdYWGxUunyRZ088gHnoX78pc/gZc3dQlRtidiGXzg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -1538,8 +1555,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= -github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1556,8 +1571,8 @@ github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.21.0-rc.0 h1:bR+RxBlwcr4q8hXkgSOA/J18j6n0/qH0Gb0DH+8c+RY= +github.com/prometheus/client_golang v1.21.0-rc.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1571,8 +1586,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= -github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ= @@ -1584,12 +1599,12 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.301.0 h1:0z8dgegmILivNomCd79RKvVkIols8vBGPKmcIBc7OyY= -github.com/prometheus/prometheus v0.301.0/go.mod h1:BJLjWCKNfRfjp7Q48DrAjARnCi7GhfUVvUFEAWTssZM= -github.com/prometheus/sigv4 v0.1.0 h1:FgxH+m1qf9dGQ4w8Dd6VkthmpFQfGTzUeavMoQeG1LA= -github.com/prometheus/sigv4 v0.1.0/go.mod h1:doosPW9dOitMzYe2I2BN0jZqUuBrGPbXrNsTScN18iU= -github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= -github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= +github.com/prometheus/prometheus v0.302.1 h1:xqVdrwrB4WNpdgJqxsz5loqFWNUZitsK8myqLuSZ6Ag= +github.com/prometheus/prometheus v0.302.1/go.mod h1:YcyCoTbUR/TM8rY3Aoeqr0AWTu/pu1Ehh+trpX3eRzg= +github.com/prometheus/sigv4 v0.1.1 h1:UJxjOqVcXctZlwDjpUpZ2OiMWJdFijgSofwLzO1Xk0Q= +github.com/prometheus/sigv4 v0.1.1/go.mod h1:RAmWVKqx0bwi0Qm4lrKMXFM0nhpesBcenfCtz9qRyH8= +github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= +github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/redis/rueidis v1.0.45-alpha.1 h1:69Bu1l7gVC/qDYuGGwMwGg2rjOjSyxESol/Zila62gY= github.com/redis/rueidis v1.0.45-alpha.1/go.mod h1:q7BfhDaPt7xxwy2nv2RqQO12/mmHflDjebpcNwWFjms= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1727,16 +1742,44 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/collector/pdata v1.22.0 h1:3yhjL46NLdTMoP8rkkcE9B0pzjf2973crn0KKhX5UrI= -go.opentelemetry.io/collector/pdata v1.22.0/go.mod h1:nLLf6uDg8Kn5g3WNZwGyu8+kf77SwOqQvMTb5AXEbEY= -go.opentelemetry.io/collector/semconv v0.116.0 h1:63xCZomsKJAWmKGWD3lnORiE3WKW6AO4LjnzcHzGx3Y= -go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= +go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/confmap v1.22.0 h1:ZKQzRuj5lKu+seKArAAZ1yPRroDPricaIVIREm/jr3w= +go.opentelemetry.io/collector/confmap v1.22.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws= +go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4= +go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU= +go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 h1:xwH3QJv6zL4u+gkPUu59NeT1Gyw9nScWT8FQpKLUJJI= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0/go.mod h1:uosvgpqTcTXtcPQORTbEkZNDQTCDOgTz1fe6aLSyqrQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.59.0 h1:iQZYNQ7WwIcYXzOPR46FQv9O0dS1PW16RjvR0TjDOe8= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.59.0/go.mod h1:54CaSNqYEXvpzDh8KPjiMVoWm60t5R0dZRt0leEPgAs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= go.opentelemetry.io/contrib/propagators/autoprop v0.54.0 h1:h/O1OcNbqrFilsMKfG6MJWWpx8gzCDfn9D+1W7lU3lE= go.opentelemetry.io/contrib/propagators/autoprop v0.54.0/go.mod h1:VIaPlErTgbng1UhrMA4N6Yy+f94PLA/qRPOCMATdoCs= go.opentelemetry.io/contrib/propagators/aws v1.33.0 h1:MefPfPIut0IxEiQRK1qVv5AFADBOwizl189+m7QhpFg= @@ -1747,31 +1790,32 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.29.0 h1:+YPiqF5rR6PqHBlmEFLPum go.opentelemetry.io/contrib/propagators/jaeger v1.29.0/go.mod h1:6PD7q7qquWSp3Z4HeM3e/2ipRubaY1rXZO8NIHVDZjs= go.opentelemetry.io/contrib/propagators/ot v1.29.0 h1:CaJU78FvXrA6ajjp1dOdcABBEjh529+hl396RTqc2LQ= go.opentelemetry.io/contrib/propagators/ot v1.29.0/go.mod h1:Sc0omwLb4eptUhwOAfYXfmPmErHPu2HV6vkeDge/3sY= -go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= -go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel/bridge/opentracing v1.33.0 h1:eH88qvKdY7ns7Xu6WlJBQNOzZ3MVvBR6tEl2euaYS9w= go.opentelemetry.io/otel/bridge/opentracing v1.33.0/go.mod h1:FNai/nhRSn/kHyv+V1zaf/30BU8hO/DXo0MvV0PaUS8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= -go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= -go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= -go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= -go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= -go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= -go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= -go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -1779,8 +1823,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= -go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org/intern v0.0.0-20230525184215-6c62f75575cb h1:ae7kzL5Cfdmcecbh22ll7lYP3iuUdnfnhiPcSaDgH/8= go4.org/intern v0.0.0-20230525184215-6c62f75575cb/go.mod h1:Ycrt6raEcnF5FTsLiLKkhBTO6DPX3RCUCUVnks3gFJU= go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 h1:WJhcL4p+YeDxmZWg141nRm7XC8IDmhz7lk5GpadO1Sg= @@ -1809,8 +1853,8 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1946,8 +1990,8 @@ golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1979,8 +2023,8 @@ golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= -golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2108,8 +2152,8 @@ golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2126,8 +2170,8 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2155,8 +2199,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2225,8 +2269,8 @@ golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= -golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2309,8 +2353,8 @@ google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZ google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.213.0 h1:KmF6KaDyFqB417T68tMPbVmmwtIXs2VB60OJKIHB0xQ= -google.golang.org/api v0.213.0/go.mod h1:V0T5ZhNUUNpYAlL306gFZPFt5F5D/IeyLoktduYYnvQ= +google.golang.org/api v0.218.0 h1:x6JCjEWeZ9PFCRe9z0FBrNwj7pB7DOAqT35N+IPnAUA= +google.golang.org/api v0.218.0/go.mod h1:5VGHBAkxrA/8EFjLVEYmMUJ8/8+gWWQ3s4cFH0FxG2M= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -2474,8 +2518,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go. google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o= -google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= @@ -2489,8 +2533,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -2515,8 +2559,8 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= +google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go index 094d8108a4b..d7b9908481b 100644 --- a/pkg/api/handlers.go +++ b/pkg/api/handlers.go @@ -233,6 +233,8 @@ func NewQuerierHandler( false, nil, false, + false, + false, ) // Let's clear all codecs to create the instrumented ones api.ClearCodecs() diff --git a/pkg/configs/userconfig/config.go b/pkg/configs/userconfig/config.go index 527383360c9..5ff287046bf 100644 --- a/pkg/configs/userconfig/config.go +++ b/pkg/configs/userconfig/config.go @@ -260,7 +260,7 @@ func (c RulesConfig) parseV2Formatted() (map[string]rulefmt.RuleGroups, error) { ruleMap := map[string]rulefmt.RuleGroups{} for fn, content := range c.Files { - rgs, errs := rulefmt.Parse([]byte(content)) + rgs, errs := rulefmt.Parse([]byte(content), false) for _, err := range errs { // return just the first error, if any return nil, err } @@ -287,7 +287,7 @@ func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) { groups := map[string][]rules.Rule{} for fn, content := range c.Files { - rgs, errs := rulefmt.Parse([]byte(content)) + rgs, errs := rulefmt.Parse([]byte(content), false) if len(errs) > 0 { return nil, fmt.Errorf("error parsing %s: %v", fn, errs[0]) } diff --git a/pkg/cortex/configinit/init.go b/pkg/cortex/configinit/init.go new file mode 100644 index 00000000000..1c16f75eea1 --- /dev/null +++ b/pkg/cortex/configinit/init.go @@ -0,0 +1,7 @@ +package configinit + +import "github.com/prometheus/common/model" + +func init() { + model.NameValidationScheme = model.LegacyValidation +} diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index 4623ea2c48a..00b67879f06 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -30,6 +30,7 @@ import ( "github.com/cortexproject/cortex/pkg/configs" configAPI "github.com/cortexproject/cortex/pkg/configs/api" "github.com/cortexproject/cortex/pkg/configs/db" + _ "github.com/cortexproject/cortex/pkg/cortex/configinit" "github.com/cortexproject/cortex/pkg/cortex/storage" "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/distributor" diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 6e28ed15c5d..0801bd5138f 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -1488,10 +1488,10 @@ func (d *Distributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetad dedupTracker[*m] = struct{}{} result = append(result, scrape.MetricMetadata{ - Metric: m.MetricFamilyName, - Help: m.Help, - Unit: m.Unit, - Type: cortexpb.MetricMetadataMetricTypeToMetricType(m.GetType()), + MetricFamily: m.MetricFamilyName, + Help: m.Help, + Unit: m.Unit, + Type: cortexpb.MetricMetadataMetricTypeToMetricType(m.GetType()), }) } } diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 8adf7f8958e..16177ad62b1 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -35,6 +35,7 @@ import ( "google.golang.org/grpc/status" promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" + _ "github.com/cortexproject/cortex/pkg/cortex/configinit" "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/ha" "github.com/cortexproject/cortex/pkg/ingester" diff --git a/pkg/querier/error_translate_queryable_test.go b/pkg/querier/error_translate_queryable_test.go index ed080307e2f..b5d13a96e4f 100644 --- a/pkg/querier/error_translate_queryable_test.go +++ b/pkg/querier/error_translate_queryable_test.go @@ -167,6 +167,8 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable, engine promql.QueryE false, nil, false, + false, + false, ) promRouter := route.New().WithPrefix("/api/v1") diff --git a/pkg/querier/metadata_handler.go b/pkg/querier/metadata_handler.go index dc81d8739f0..e185cdc6084 100644 --- a/pkg/querier/metadata_handler.go +++ b/pkg/querier/metadata_handler.go @@ -44,13 +44,13 @@ func MetadataHandler(m MetadataQuerier) http.Handler { // Put all the elements of the pseudo-set into a map of slices for marshalling. metrics := map[string][]metricMetadata{} for _, m := range resp { - ms, ok := metrics[m.Metric] + ms, ok := metrics[m.MetricFamily] if !ok { // Most metrics will only hold 1 copy of the same metadata. ms = make([]metricMetadata, 0, 1) - metrics[m.Metric] = ms + metrics[m.MetricFamily] = ms } - metrics[m.Metric] = append(ms, metricMetadata{Type: string(m.Type), Help: m.Help, Unit: m.Unit}) + metrics[m.MetricFamily] = append(ms, metricMetadata{Type: string(m.Type), Help: m.Help, Unit: m.Unit}) } util.WriteJSONResponse(w, metadataResult{Status: statusSuccess, Data: metrics}) diff --git a/pkg/querier/metadata_handler_test.go b/pkg/querier/metadata_handler_test.go index e1021462c98..c47daa1e014 100644 --- a/pkg/querier/metadata_handler_test.go +++ b/pkg/querier/metadata_handler_test.go @@ -18,7 +18,7 @@ func TestMetadataHandler_Success(t *testing.T) { d := &MockDistributor{} d.On("MetricsMetadata", mock.Anything).Return( []scrape.MetricMetadata{ - {Metric: "alertmanager_dispatcher_aggregation_groups", Help: "Number of active aggregation groups", Type: "gauge", Unit: ""}, + {MetricFamily: "alertmanager_dispatcher_aggregation_groups", Help: "Number of active aggregation groups", Type: "gauge", Unit: ""}, }, nil) diff --git a/pkg/querier/stats_renderer_test.go b/pkg/querier/stats_renderer_test.go index 2afb53b16f7..6f197b01657 100644 --- a/pkg/querier/stats_renderer_test.go +++ b/pkg/querier/stats_renderer_test.go @@ -88,6 +88,8 @@ func Test_StatsRenderer(t *testing.T) { false, nil, false, + false, + false, ) promRouter := route.New().WithPrefix("/api/v1") diff --git a/pkg/querier/tenantfederation/metadata_merge_querier_test.go b/pkg/querier/tenantfederation/metadata_merge_querier_test.go index 1157218e25b..58e5f955ba5 100644 --- a/pkg/querier/tenantfederation/metadata_merge_querier_test.go +++ b/pkg/querier/tenantfederation/metadata_merge_querier_test.go @@ -81,12 +81,12 @@ func Test_mergeMetadataQuerier_MetricsMetadata(t *testing.T) { name: "single tenant", tenantIdToMetadata: map[string][]scrape.MetricMetadata{ "user-1": { - {Metric: "metadata1", Help: "metadata1 help", Type: "gauge", Unit: ""}, + {MetricFamily: "metadata1", Help: "metadata1 help", Type: "gauge", Unit: ""}, }, }, orgId: "user-1", expectedResults: []scrape.MetricMetadata{ - {Metric: "metadata1", Help: "metadata1 help", Type: "gauge", Unit: ""}, + {MetricFamily: "metadata1", Help: "metadata1 help", Type: "gauge", Unit: ""}, }, expectedMetrics: expectedSingleTenantsMetadataMetrics, }, @@ -94,18 +94,18 @@ func Test_mergeMetadataQuerier_MetricsMetadata(t *testing.T) { name: "should be merged two tenants results", tenantIdToMetadata: map[string][]scrape.MetricMetadata{ "user-1": { - {Metric: "metadata1", Help: "metadata1 help", Type: "gauge", Unit: ""}, + {MetricFamily: "metadata1", Help: "metadata1 help", Type: "gauge", Unit: ""}, }, "user-2": { - {Metric: "metadata2", Help: "metadata2 help", Type: "counter", Unit: ""}, - {Metric: "metadata3", Help: "metadata3 help", Type: "gauge", Unit: ""}, + {MetricFamily: "metadata2", Help: "metadata2 help", Type: "counter", Unit: ""}, + {MetricFamily: "metadata3", Help: "metadata3 help", Type: "gauge", Unit: ""}, }, }, orgId: "user-1|user-2", expectedResults: []scrape.MetricMetadata{ - {Metric: "metadata1", Help: "metadata1 help", Type: "gauge", Unit: ""}, - {Metric: "metadata2", Help: "metadata2 help", Type: "counter", Unit: ""}, - {Metric: "metadata3", Help: "metadata3 help", Type: "gauge", Unit: ""}, + {MetricFamily: "metadata1", Help: "metadata1 help", Type: "gauge", Unit: ""}, + {MetricFamily: "metadata2", Help: "metadata2 help", Type: "counter", Unit: ""}, + {MetricFamily: "metadata3", Help: "metadata3 help", Type: "gauge", Unit: ""}, }, expectedMetrics: expectedTwoTenantsMetadataMetrics, }, @@ -113,17 +113,17 @@ func Test_mergeMetadataQuerier_MetricsMetadata(t *testing.T) { name: "should be deduplicated when the same metadata exist", tenantIdToMetadata: map[string][]scrape.MetricMetadata{ "user-1": { - {Metric: "metadata1", Help: "metadata1 help", Type: "gauge", Unit: ""}, - {Metric: "metadata2", Help: "metadata2 help", Type: "counter", Unit: ""}, + {MetricFamily: "metadata1", Help: "metadata1 help", Type: "gauge", Unit: ""}, + {MetricFamily: "metadata2", Help: "metadata2 help", Type: "counter", Unit: ""}, }, "user-2": { - {Metric: "metadata2", Help: "metadata2 help", Type: "counter", Unit: ""}, + {MetricFamily: "metadata2", Help: "metadata2 help", Type: "counter", Unit: ""}, }, }, orgId: "user-1|user-2", expectedResults: []scrape.MetricMetadata{ - {Metric: "metadata1", Help: "metadata1 help", Type: "gauge", Unit: ""}, - {Metric: "metadata2", Help: "metadata2 help", Type: "counter", Unit: ""}, + {MetricFamily: "metadata1", Help: "metadata1 help", Type: "gauge", Unit: ""}, + {MetricFamily: "metadata2", Help: "metadata2 help", Type: "counter", Unit: ""}, }, expectedMetrics: expectedTwoTenantsMetadataMetrics, }, diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 1c3d3ceaa9d..37391e6926d 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -370,6 +370,7 @@ func DefaultTenantManagerFactory(cfg Config, p Pusher, q storage.Queryable, engi DefaultRuleQueryOffset: func() time.Duration { return overrides.RulerQueryOffset(userID) }, + RestoreNewRuleGroups: true, }), nil } } diff --git a/pkg/ruler/rulestore/local/local.go b/pkg/ruler/rulestore/local/local.go index 333687e6e59..30722f0f4f3 100644 --- a/pkg/ruler/rulestore/local/local.go +++ b/pkg/ruler/rulestore/local/local.go @@ -170,7 +170,7 @@ func (l *Client) loadAllRulesGroupsForUser(ctx context.Context, userID string) ( func (l *Client) loadAllRulesGroupsForUserAndNamespace(_ context.Context, userID string, namespace string) (rulespb.RuleGroupList, error) { filename := filepath.Join(l.cfg.Directory, userID, namespace) - rulegroups, allErrors := l.loader.Load(filename) + rulegroups, allErrors := l.loader.Load(filename, false) if len(allErrors) > 0 { return nil, errors.Wrapf(allErrors[0], "error parsing %s", filename) } diff --git a/pkg/util/validation/validate_test.go b/pkg/util/validation/validate_test.go index 80eff186912..6b3c15fca78 100644 --- a/pkg/util/validation/validate_test.go +++ b/pkg/util/validation/validate_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/require" "github.com/weaveworks/common/httpgrpc" + _ "github.com/cortexproject/cortex/pkg/cortex/configinit" "github.com/cortexproject/cortex/pkg/cortexpb" util_log "github.com/cortexproject/cortex/pkg/util/log" ) diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index 39a47c85eb2..466426c0d86 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,19 @@ # Changelog +## [0.14.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.13.0...auth/v0.14.0) (2025-01-08) + + +### Features + +* **auth:** Add universe domain support to idtoken ([#11059](https://github.com/googleapis/google-cloud-go/issues/11059)) ([72add7e](https://github.com/googleapis/google-cloud-go/commit/72add7e9f8f455af695e8ef79212a4bd3122fb3a)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) +* **auth:** Fix copy of delegates in impersonate.NewIDTokenCredentials ([#11386](https://github.com/googleapis/google-cloud-go/issues/11386)) ([ff7ef8e](https://github.com/googleapis/google-cloud-go/commit/ff7ef8e7ade7171bce3e4f30ff10a2e9f6c27ca0)), refs [#11379](https://github.com/googleapis/google-cloud-go/issues/11379) +* **auth:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) + ## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.1...auth/v0.13.0) (2024-12-13) diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go new file mode 100644 index 00000000000..705462c1615 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go @@ -0,0 +1,105 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" +) + +var ( + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + iamCredentialsUniverseDomainEndpoint = "https://iamcredentials.UNIVERSE_DOMAIN" +) + +// IDTokenIAMOptions provides configuration for [IDTokenIAMOptions.Token]. +type IDTokenIAMOptions struct { + // Client is required. + Client *http.Client + // Logger is required. + Logger *slog.Logger + UniverseDomain auth.CredentialsPropertyProvider + ServiceAccountEmail string + GenerateIDTokenRequest +} + +// GenerateIDTokenRequest holds the request to the IAM generateIdToken RPC. +type GenerateIDTokenRequest struct { + Audience string `json:"audience"` + IncludeEmail bool `json:"includeEmail"` + // Delegates are the ordered, fully-qualified resource name for service + // accounts in a delegation chain. Each service account must be granted + // roles/iam.serviceAccountTokenCreator on the next service account in the + // chain. The delegates must have the following format: + // projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}. The - wildcard + // character is required; replacing it with a project ID is invalid. + // Optional. + Delegates []string `json:"delegates,omitempty"` +} + +// GenerateIDTokenResponse holds the response from the IAM generateIdToken RPC. +type GenerateIDTokenResponse struct { + Token string `json:"token"` +} + +// Token call IAM generateIdToken with the configuration provided in [IDTokenIAMOptions]. +func (o IDTokenIAMOptions) Token(ctx context.Context) (*auth.Token, error) { + universeDomain, err := o.UniverseDomain.GetProperty(ctx) + if err != nil { + return nil, err + } + endpoint := strings.Replace(iamCredentialsUniverseDomainEndpoint, universeDomainPlaceholder, universeDomain, 1) + url := fmt.Sprintf("%s/v1/%s:generateIdToken", endpoint, internal.FormatIAMServiceAccountResource(o.ServiceAccountEmail)) + + bodyBytes, err := json.Marshal(o.GenerateIDTokenRequest) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + o.Logger.DebugContext(ctx, "impersonated idtoken request", "request", internallog.HTTPRequest(req, bodyBytes)) + resp, body, err := internal.DoRequest(o.Client, req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err) + } + o.Logger.DebugContext(ctx, "impersonated idtoken response", "response", internallog.HTTPResponse(resp, body)) + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var tokenResp GenerateIDTokenResponse + if err := json.Unmarshal(body, &tokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %w", err) + } + return &auth.Token{ + Value: tokenResp.Token, + // Generated ID tokens are good for one hour. + Expiry: time.Now().Add(1 * time.Hour), + }, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 6f4ef43bba3..6a8eab6eb99 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -217,3 +217,9 @@ func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (st } return "", err } + +// FormatIAMServiceAccountResource sets a service account name in an IAM resource +// name. +func FormatIAMServiceAccountResource(name string) string { + return fmt.Sprintf("projects/-/serviceAccounts/%s", name) +} diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md index a1ef2923799..d9044f1a94b 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -1,5 +1,12 @@ # Changelog +## [0.2.7](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.6...auth/oauth2adapt/v0.2.7) (2025-01-09) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) + ## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index f88b277ab63..cf422304e7b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,12 @@ # Release History +## 1.17.0 (2025-01-07) + +### Features Added + +* Added field `OperationLocationResultPath` to `runtime.NewPollerOptions[T]` for LROs that use the `Operation-Location` pattern. +* Support `encoding.TextMarshaler` and `encoding.TextUnmarshaler` interfaces in `arm.ResourceID`. + ## 1.16.0 (2024-10-17) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go index 00f2d5a0ab9..d9a4e36dccb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go @@ -110,6 +110,21 @@ func (id *ResourceID) String() string { return id.stringValue } +// MarshalText returns a textual representation of the ResourceID +func (id *ResourceID) MarshalText() ([]byte, error) { + return []byte(id.String()), nil +} + +// UnmarshalText decodes the textual representation of a ResourceID +func (id *ResourceID) UnmarshalText(text []byte) error { + newId, err := ParseResourceID(string(text)) + if err != nil { + return err + } + *id = *newId + return nil +} + func newResourceID(parent *ResourceID, resourceTypeName string, resourceName string) *ResourceID { id := &ResourceID{} id.init(parent, chooseResourceType(resourceTypeName, parent), resourceName, true) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go index 03699fd7629..f4963318932 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -40,12 +40,13 @@ type Poller[T any] struct { OrigURL string `json:"origURL"` Method string `json:"method"` FinalState pollers.FinalStateVia `json:"finalState"` + ResultPath string `json:"resultPath"` CurState string `json:"state"` } // New creates a new Poller from the provided initial response. // Pass nil for response to create an empty Poller for rehydration. -func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia, resultPath string) (*Poller[T], error) { if resp == nil { log.Write(log.EventLRO, "Resuming Operation-Location poller.") return &Poller[T]{pl: pl}, nil @@ -82,6 +83,7 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi OrigURL: resp.Request.URL.String(), Method: resp.Request.Method, FinalState: finalState, + ResultPath: resultPath, CurState: curState, }, nil } @@ -116,10 +118,6 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { var req *exported.Request var err error - // when the payload is included with the status monitor on - // terminal success it's in the "result" JSON property - payloadPath := "result" - if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { @@ -138,7 +136,7 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { // if a final GET request has been created, execute it if req != nil { // no JSON path when making a final GET request - payloadPath = "" + p.ResultPath = "" resp, err := p.pl.Do(req) if err != nil { return err @@ -146,5 +144,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { p.resp = resp } - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), payloadPath, out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), p.ResultPath, out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 9f53770e5b6..44ab00d4008 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.16.0" + Version = "v1.17.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go index b960cff0b2d..c66fc0a90a5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -32,6 +32,7 @@ type PagingHandler[T any] struct { } // Pager provides operations for iterating over paged responses. +// Methods on this type are not safe for concurrent use. type Pager[T any] struct { current *T handler PagingHandler[T] diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index 03f76c9aa8e..4f90e447432 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -50,8 +50,14 @@ const ( // NewPollerOptions contains the optional parameters for NewPoller. type NewPollerOptions[T any] struct { // FinalStateVia contains the final-state-via value for the LRO. + // NOTE: used only for Azure-AsyncOperation and Operation-Location LROs. FinalStateVia FinalStateVia + // OperationLocationResultPath contains the JSON path to the result's + // payload when it's included with the terminal success response. + // NOTE: only used for Operation-Location LROs. + OperationLocationResultPath string + // Response contains a preconstructed response type. // The final payload will be unmarshaled into it and returned. Response *T @@ -98,7 +104,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol opr, err = async.New[T](pl, resp, options.FinalStateVia) } else if op.Applicable(resp) { // op poller must be checked before loc as it can also have a location header - opr, err = op.New[T](pl, resp, options.FinalStateVia) + opr, err = op.New[T](pl, resp, options.FinalStateVia, options.OperationLocationResultPath) } else if loc.Applicable(resp) { opr, err = loc.New[T](pl, resp) } else if body.Applicable(resp) { @@ -172,7 +178,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options } else if loc.CanResume(asJSON) { opr, _ = loc.New[T](pl, nil) } else if op.CanResume(asJSON) { - opr, _ = op.New[T](pl, nil, "") + opr, _ = op.New[T](pl, nil, "", "") } else { return nil, fmt.Errorf("unhandled poller token %s", string(raw)) } @@ -200,6 +206,7 @@ type PollingHandler[T any] interface { } // Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. +// Methods on this type are not safe for concurrent use. type Poller[T any] struct { op PollingHandler[T] resp *http.Response diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md index ea267e4f411..567e6975b18 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md @@ -1,5 +1,15 @@ # Breaking Changes +## v1.8.0 + +### New errors from `NewManagedIdentityCredential` in some environments + +`NewManagedIdentityCredential` now returns an error when `ManagedIdentityCredentialOptions.ID` is set in a hosting environment whose managed identity API doesn't support user-assigned identities. `ManagedIdentityCredential.GetToken()` formerly logged a warning in these cases. Returning an error instead prevents the credential authenticating an unexpected identity. The affected hosting environments are: + * Azure Arc + * Azure ML (when a resource or object ID is specified; client IDs are supported) + * Cloud Shell + * Service Fabric + ## v1.6.0 ### Behavioral change to `DefaultAzureCredential` in IMDS managed identity scenarios diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index e35f5ad935b..1ffc19a548f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,19 @@ # Release History +## 1.8.1 (2025-01-15) + +### Bugs Fixed +* User credential types inconsistently log access token scopes +* `DefaultAzureCredential` skips managed identity in Azure Container Instances +* Credentials having optional tenant IDs such as `AzureCLICredential` and + `InteractiveBrowserCredential` require setting `AdditionallyAllowedTenants` + when used with some clients + +### Other Changes +* `ChainedTokenCredential` and `DefaultAzureCredential` continue to their next + credential after `ManagedIdentityCredential` receives an unexpected response + from IMDS, indicating the response is from something else such as a proxy + ## 1.8.0 (2024-10-08) ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index 96f30b25cc3..c99ce5b2b56 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -54,17 +54,7 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. ### DefaultAzureCredential -`DefaultAzureCredential` simplifies authentication while developing applications that deploy to Azure by combining credentials used in Azure hosting environments and credentials used in local development. In production, it's better to use a specific credential type so authentication is more predictable and easier to debug. `DefaultAzureCredential` attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: - -![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) - -1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate. -1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity. -1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. -1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. -1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account. - -> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. +`DefaultAzureCredential` simplifies authentication while developing apps that deploy to Azure by combining credentials used in Azure hosting environments with credentials used in local development. For more information, see [DefaultAzureCredential overview][dac_overview]. ## Managed Identity @@ -128,10 +118,10 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) ### Credential chains -|Credential|Usage -|-|- -|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps -|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials +|Credential|Usage|Reference +|-|-|- +|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps|[DefaultAzureCredential overview][dac_overview]| +|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials|[ChainedTokenCredential overview][ctc_overview]| ### Authenticating Azure-Hosted Applications @@ -260,4 +250,8 @@ For more information, see the or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +[ctc_overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview +[dac_overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview + ![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index e0bd09c636e..8fc7c64aa34 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -22,13 +22,13 @@ Some credential types support opt-in persistent token caching (see [the below ta Persistent caches are encrypted at rest using a mechanism that depends on the operating system: -| Operating system | Encryption facility | -|------------------|---------------------------------------| -| Linux | kernel key retention service (keyctl) | -| macOS | Keychain | -| Windows | Data Protection API (DPAPI) | +| Operating system | Encryption facility | +| ---------------- | ---------------------------------------------- | +| Linux | kernel key retention service (keyctl) | +| macOS | Keychain (requires cgo and native build tools) | +| Windows | Data Protection API (DPAPI) | -Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the [package documentation][example] for example code showing how to configure persistent caching and access cached data. +Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the package documentation for examples showing how to configure persistent caching and access cached data for [users][user_example] and [service principals][sp_example]. ### Credentials supporting token caching @@ -37,7 +37,7 @@ The following table indicates the state of in-memory and persistent caching in e **Note:** in-memory caching is enabled by default for every type supporting it. Persistent token caching must be enabled explicitly. See the [package documentation][user_example] for an example showing how to do this for credential types authenticating users. For types that authenticate service principals, set the `Cache` field on the constructor's options as shown in [this example][sp_example]. | Credential | In-memory token caching | Persistent token caching | -|--------------------------------|---------------------------------------------------------------------|--------------------------| +| ------------------------------ | ------------------------------------------------------------------- | ------------------------ | | `AzureCLICredential` | Not Supported | Not Supported | | `AzureDeveloperCLICredential` | Not Supported | Not Supported | | `AzurePipelinesCredential` | Supported | Supported | diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index c24f67e84a7..9c4b1cd71c8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -8,6 +8,7 @@ This troubleshooting guide covers failure investigation techniques, common error - [Permission issues](#permission-issues) - [Find relevant information in errors](#find-relevant-information-in-errors) - [Enable and configure logging](#enable-and-configure-logging) +- [Troubleshoot persistent token caching issues](#troubleshoot-persistent-token-caching-issues) - [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) - [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues) - [Troubleshoot AzurePipelinesCredential authentication issues](#troubleshoot-azurepipelinescredential-authentication-issues) @@ -236,6 +237,29 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul | No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.| |401 (Unauthorized) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).| +## Troubleshoot persistent token caching issues + +### macOS + +[azidentity/cache](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache) encrypts persistent caches with the system Keychain on macOS. You may see build and runtime errors there because calling the Keychain API requires cgo and macOS prohibits Keychain access in some scenarios. + +#### Build errors + +Build errors about undefined `accessor` symbols indicate that cgo wasn't enabled. For example: +``` +$ GOOS=darwin go build +# github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache +../../go/pkg/mod/github.com/!azure/azure-sdk-for-go/sdk/azidentity/cache@v0.3.0/darwin.go:18:19: undefined: accessor.New +../../go/pkg/mod/github.com/!azure/azure-sdk-for-go/sdk/azidentity/cache@v0.3.0/darwin.go:18:38: undefined: accessor.WithAccount +``` + +Try `go build` again with `CGO_ENABLED=1`. You may need to install native build tools. + +#### Runtime errors + +macOS prohibits Keychain access from environments without a GUI such as SSH sessions. If your application calls the persistent cache constructor ([cache.New](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache#New)) from an SSH session on a macOS host, you'll see an error like +`persistent storage isn't available due to error "User interaction is not allowed. (-25308)"`. This doesn't mean authentication is impossible, only that credentials can't persist data and the application must reauthenticate the next time it runs. + ## Get additional help Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index ce55dc658e3..40a94154c67 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -42,6 +42,8 @@ const ( developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" defaultSuffix = "/.default" + scopeLogFmt = "%s.GetToken() acquired a token for scope %q" + traceNamespace = "Microsoft.Entra" traceOpGetToken = "GetToken" traceOpAuthenticate = "Authenticate" @@ -103,7 +105,16 @@ func resolveAdditionalTenants(tenants []string) []string { return cp } -// resolveTenant returns the correct tenant for a token request +// resolveTenant returns the correct tenant for a token request, or "" when the calling credential doesn't +// have an explicitly configured tenant and the caller didn't specify a tenant for the token request. +// +// - defaultTenant: tenant set when constructing the credential, if any. "" is valid for credentials +// having an optional or implicit tenant such as dev tool and interactive user credentials. Those +// default to the tool's configured tenant or the user's home tenant, respectively. +// - specified: tenant specified for this token request i.e., TokenRequestOptions.TenantID. May be "". +// - credName: name of the calling credential type; for error messages +// - additionalTenants: optional allow list of tenants the credential may acquire tokens from in +// addition to defaultTenant i.e., the credential's AdditionallyAllowedTenants option func resolveTenant(defaultTenant, specified, credName string, additionalTenants []string) (string, error) { if specified == "" || specified == defaultTenant { return defaultTenant, nil @@ -119,6 +130,17 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants return specified, nil } } + if len(additionalTenants) == 0 { + switch defaultTenant { + case "", organizationsTenantID: + // The application didn't specify a tenant or allow list when constructing the credential. Allow the + // tenant specified for this token request because we have nothing to compare it to (i.e., it vacuously + // satisfies the credential's configuration); don't know whether the application is multitenant; and + // don't want to return an error in the common case that the specified tenant matches the credential's + // default tenant determined elsewhere e.g., in some dev tool's configuration. + return specified, nil + } + } return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index b9976f5fede..e2f371cfd87 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -30,9 +30,9 @@ type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscrip // AzureCLICredentialOptions contains optional parameters for AzureCLICredential. type AzureCLICredentialOptions struct { - // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition - // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the - // logged in account can access. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. AdditionallyAllowedTenants []string // Subscription is the name or ID of a subscription. Set this to acquire tokens for an account other diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go index cbe7c4c2db1..46d0b551922 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go @@ -30,9 +30,9 @@ type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) // AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential. type AzureDeveloperCLICredentialOptions struct { - // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition - // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the - // logged in account can access. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. AdditionallyAllowedTenants []string // TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go index 2460f66ec1e..82342a02545 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -27,7 +27,10 @@ type ChainedTokenCredentialOptions struct { } // ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default, -// it tries all the credentials until one authenticates, after which it always uses that credential. +// it tries all the credentials until one authenticates, after which it always uses that credential. For more information, +// see [ChainedTokenCredential overview]. +// +// [ChainedTokenCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview type ChainedTokenCredential struct { cond *sync.Cond iterating bool @@ -46,6 +49,9 @@ func NewChainedTokenCredential(sources []azcore.TokenCredential, options *Chaine if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil return nil, errors.New("sources cannot contain nil") } + if mc, ok := source.(*ManagedIdentityCredential); ok { + mc.mic.chained = true + } } cp := make([]azcore.TokenCredential, len(sources)) copy(cp, sources) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index 62c12b5465f..c3af0cdc2d6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -26,27 +26,16 @@ extends: parameters: CloudConfig: Public: - ServiceConnection: azure-sdk-tests - SubscriptionConfigurationFilePaths: - - eng/common/TestResources/sub-config/AzurePublicMsft.json SubscriptionConfigurations: - - $(sub-config-azure-cloud-test-resources) - $(sub-config-identity-test-resources) EnableRaceDetector: true + Location: westus2 RunLiveTests: true ServiceDirectory: azidentity UsePipelineProxy: false ${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}: - PreSteps: - - task: AzureCLI@2 - displayName: Set OIDC token - inputs: - addSpnToEnvironment: true - azureSubscription: azure-sdk-tests - inlineScript: Write-Host "##vso[task.setvariable variable=OIDC_TOKEN;]$($env:idToken)" - scriptLocation: inlineScript - scriptType: pscore + PersistOidcToken: true MatrixConfigs: - Name: managed_identity_matrix GenerateVMJobs: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 7059a510c22..92f508094df 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -115,7 +115,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque err = newAuthenticationFailedErrorFromMSAL(c.name, err) } } else { - msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", ")) + msg := fmt.Sprintf(scopeLogFmt, c.name, strings.Join(ar.GrantedScopes, ", ")) log.Write(EventAuthentication, msg) } return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 3cfc0f7bf1d..14af271f6a1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -23,15 +23,19 @@ type DefaultAzureCredentialOptions struct { // to credential types that authenticate via external tools such as the Azure CLI. azcore.ClientOptions - // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add - // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be - // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. + // This value can also be set as a semicolon delimited list of tenants in the environment variable + // AZURE_ADDITIONALLY_ALLOWED_TENANTS. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // TenantID sets the default tenant for authentication via the Azure CLI and workload identity. TenantID string } @@ -39,7 +43,7 @@ type DefaultAzureCredentialOptions struct { // DefaultAzureCredential simplifies authentication while developing applications that deploy to Azure by // combining credentials used in Azure hosting environments and credentials used in local development. In // production, it's better to use a specific credential type so authentication is more predictable and easier -// to debug. +// to debug. For more information, see [DefaultAzureCredential overview]. // // DefaultAzureCredential attempts to authenticate with each of these credential types, in the following order, // stopping when one provides a token: @@ -55,6 +59,8 @@ type DefaultAzureCredentialOptions struct { // Consult the documentation for these credential types for more information on how they authenticate. // Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for // every subsequent authentication. +// +// [DefaultAzureCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview type DefaultAzureCredential struct { chain *ChainedTokenCredential } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index 53c4c728735..53ae9767f42 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -21,8 +21,9 @@ const credNameDeviceCode = "DeviceCodeCredential" type DeviceCodeCredentialOptions struct { azcore.ClientOptions - // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire - // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. AdditionallyAllowedTenants []string // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index 848db16e435..ec89de9b5b2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -20,8 +20,9 @@ const credNameBrowser = "InteractiveBrowserCredential" type InteractiveBrowserCredentialOptions struct { azcore.ClientOptions - // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire - // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. AdditionallyAllowedTenants []string // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index 4c657a92ec5..cc07fd70153 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -65,6 +65,9 @@ type managedIdentityClient struct { id ManagedIDKind msiType msiType probeIMDS bool + // chained indicates whether the client is part of a credential chain. If true, the client will return + // a credentialUnavailableError instead of an AuthenticationFailedError for an unexpected IMDS response. + chained bool } // arcKeyDirectory returns the directory expected to contain Azure Arc keys @@ -144,7 +147,7 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag if _, ok := os.LookupEnv(identityHeader); ok { if _, ok := os.LookupEnv(identityServerThumbprint); ok { if options.ID != nil { - return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned managed identity at runtime") + return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned identity at runtime. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi") } env = "Service Fabric" c.endpoint = endpoint @@ -215,6 +218,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi // no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client, // and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block if c.probeIMDS { + // send a malformed request (no Metadata header) to IMDS to determine whether the endpoint is available cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout) defer cancel() cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1}) @@ -222,24 +226,14 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi if err != nil { return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err) } - res, err := c.azClient.Pipeline().Do(req) - if err != nil { + if _, err = c.azClient.Pipeline().Do(req); err != nil { msg := err.Error() if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { msg = "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information" } return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) } - // because IMDS always responds with JSON, assume a non-JSON response is from something else, such - // as a proxy, and return credentialUnavailableError so DefaultAzureCredential continues iterating - b, err := azruntime.Payload(res) - if err != nil { - return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("failed to read IMDS probe response: %s", err)) - } - if !json.Valid(b) { - return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "unexpected response to IMDS probe") - } - // send normal token requests from now on because IMDS responded + // send normal token requests from now on because something responded c.probeIMDS = false } @@ -254,13 +248,21 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi } if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { - return c.createAccessToken(resp) + tk, err := c.createAccessToken(resp) + if err != nil && c.chained && c.msiType == msiTypeIMDS { + // failure to unmarshal a 2xx implies the response is from something other than IMDS such as a proxy listening at + // the same address. Return a credentialUnavailableError so credential chains continue to their next credential + err = newCredentialUnavailableError(credNameManagedIdentity, err.Error()) + } + return tk, err } if c.msiType == msiTypeIMDS { switch resp.StatusCode { case http.StatusBadRequest: if id != nil { + // return authenticationFailedError, halting any encompassing credential chain, + // because the explicit user-assigned identity implies the developer expected this to work return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp) } msg := "failed to authenticate a system assigned identity" @@ -276,6 +278,13 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body))) } } + if c.chained { + // the response may be from something other than IMDS, for example a proxy returning + // 404. Return credentialUnavailableError so credential chains continue to their + // next credential, include the response in the error message to help debugging + err = newAuthenticationFailedError(credNameManagedIdentity, "", resp) + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error()) + } } return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "", resp) @@ -290,7 +299,7 @@ func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.Ac ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string }{} if err := azruntime.UnmarshalAsJSON(res, &value); err != nil { - return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "Unexpected response content", res) } if value.ExpiresIn != "" { expiresIn, err := json.Number(value.ExpiresIn).Int64() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index 73363e1c9e7..ef5e4d72129 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -154,12 +154,7 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti if p.opts.DisableAutomaticAuthentication { return azcore.AccessToken{}, newAuthenticationRequiredError(p.name, tro) } - at, err := p.reqToken(ctx, client, tro) - if err == nil { - msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", ")) - log.Write(EventAuthentication, msg) - } - return at, err + return p.reqToken(ctx, client, tro) } // reqToken requests a token from the MSAL public client. It's separate from GetToken() to enable Authenticate() to bypass the cache. @@ -242,6 +237,8 @@ func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) { if err == nil { + msg := fmt.Sprintf(scopeLogFmt, p.name, strings.Join(ar.GrantedScopes, ", ")) + log.Write(EventAuthentication, msg) p.record, err = newAuthenticationRecord(ar) } else { err = newAuthenticationFailedErrorFromMSAL(p.name, err) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 index 1a07fede637..efa8c6d3eb1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -7,6 +7,10 @@ param ( [hashtable] $AdditionalParameters = @{}, [hashtable] $DeploymentOutputs, + [Parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [string] $SubscriptionId, + [Parameter(ParameterSetName = 'Provisioner', Mandatory = $true)] [ValidateNotNullOrEmpty()] [string] $TenantId, @@ -15,6 +19,10 @@ param ( [ValidatePattern('^[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$')] [string] $TestApplicationId, + [Parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [string] $Environment, + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). [Parameter(ValueFromRemainingArguments = $true)] $RemainingArguments @@ -28,8 +36,9 @@ if ($CI) { Write-Host "Skipping post-provisioning script because resources weren't deployed" return } - az login --federated-token $env:OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId - az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID'] + az cloud set -n $Environment + az login --federated-token $env:ARM_OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId + az account set --subscription $SubscriptionId } Write-Host "Building container" @@ -62,6 +71,9 @@ $aciName = "azidentity-test" az container create -g $rg -n $aciName --image $image ` --acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` --assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + --cpu 1 ` + --memory 1.0 ` + --os-type Linux ` --role "Storage Blob Data Reader" ` --scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) ` -e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) ` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 4fa22dcc121..88c1078a722 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.8.0" + version = "v1.8.1" ) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index f86286051de..57d0e2777e1 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -18,6 +18,8 @@ import ( "encoding/pem" "errors" "fmt" + "os" + "strings" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" @@ -315,16 +317,21 @@ func New(authority, clientID string, cred Credential, options ...Option) (Client if err != nil { return Client{}, err } - + autoEnabledRegion := os.Getenv("MSAL_FORCE_REGION") opts := clientOptions{ authority: authority, // if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache disableInstanceDiscovery: cred.tokenProvider != nil, httpClient: shared.DefaultClient, + azureRegion: autoEnabledRegion, } for _, o := range options { o(&opts) } + if strings.EqualFold(opts.azureRegion, "DisableMsalForceRegion") { + opts.azureRegion = "" + } + baseOpts := []base.Option{ base.WithCacheAccessor(opts.accessor), base.WithClientCapabilities(opts.capabilities), diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go index 09a0d92f520..e473d1267da 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -89,8 +89,23 @@ type AuthResult struct { ExpiresOn time.Time GrantedScopes []string DeclinedScopes []string + Metadata AuthResultMetadata } +// AuthResultMetadata which contains meta data for the AuthResult +type AuthResultMetadata struct { + TokenSource TokenSource +} + +type TokenSource int + +// These are all the types of token flows. +const ( + SourceUnknown TokenSource = 0 + IdentityProvider TokenSource = 1 + Cache TokenSource = 2 +) + // AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) { if err := storageTokenResponse.AccessToken.Validate(); err != nil { @@ -109,7 +124,17 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err) } } - return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil + return AuthResult{ + Account: account, + IDToken: idToken, + AccessToken: accessToken, + ExpiresOn: storageTokenResponse.AccessToken.ExpiresOn.T, + GrantedScopes: grantedScopes, + DeclinedScopes: nil, + Metadata: AuthResultMetadata{ + TokenSource: Cache, + }, + }, nil } // NewAuthResult creates an AuthResult. @@ -123,6 +148,9 @@ func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Acco AccessToken: tokenResponse.AccessToken, ExpiresOn: tokenResponse.ExpiresOn.T, GrantedScopes: tokenResponse.GrantedScopes.Slice, + Metadata: AuthResultMetadata{ + TokenSource: IdentityProvider, + }, }, nil } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go index 2238521f5f9..2134e57c9e4 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -18,10 +18,6 @@ import ( ) const addField = "AdditionalFields" -const ( - marshalJSON = "MarshalJSON" - unmarshalJSON = "UnmarshalJSON" -) var ( leftBrace = []byte("{")[0] @@ -106,48 +102,38 @@ func delimIs(got json.Token, want rune) bool { // hasMarshalJSON will determine if the value or a pointer to this value has // the MarshalJSON method. func hasMarshalJSON(v reflect.Value) bool { - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Marshaler) - return ok - } - - if v.Kind() == reflect.Ptr { - v = v.Elem() - } else { - if !v.CanAddr() { - return false + ok := false + if _, ok = v.Interface().(json.Marshaler); !ok { + var i any + if v.Kind() == reflect.Ptr { + i = v.Elem().Interface() + } else if v.CanAddr() { + i = v.Addr().Interface() } - v = v.Addr() - } - - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Marshaler) - return ok + _, ok = i.(json.Marshaler) } - return false + return ok } // callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value. // This will panic if the method is not defined. func callMarshalJSON(v reflect.Value) ([]byte, error) { - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - marsh := v.Interface().(json.Marshaler) + if marsh, ok := v.Interface().(json.Marshaler); ok { return marsh.MarshalJSON() } if v.Kind() == reflect.Ptr { - v = v.Elem() + if marsh, ok := v.Elem().Interface().(json.Marshaler); ok { + return marsh.MarshalJSON() + } } else { if v.CanAddr() { - v = v.Addr() + if marsh, ok := v.Addr().Interface().(json.Marshaler); ok { + return marsh.MarshalJSON() + } } } - if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { - marsh := v.Interface().(json.Marshaler) - return marsh.MarshalJSON() - } - panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface())) } @@ -162,12 +148,8 @@ func hasUnmarshalJSON(v reflect.Value) bool { v = v.Addr() } - if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Unmarshaler) - return ok - } - - return false + _, ok := v.Interface().(json.Unmarshaler) + return ok } // hasOmitEmpty indicates if the field has instructed us to not output diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go index 04236ff3127..fda5d7dd333 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -7,6 +7,7 @@ package local import ( "context" "fmt" + "html" "net" "net/http" "strconv" @@ -141,7 +142,7 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) { headerErr := q.Get("error") if headerErr != "" { - desc := q.Get("error_description") + desc := html.EscapeString(q.Get("error_description")) // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, // change this to s.error() and make s.error() write the failPage instead of an error code. _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go index ef8d908a444..e0653134448 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -10,6 +10,8 @@ import ( "io" "time" + "github.com/google/uuid" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" @@ -18,7 +20,6 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" - "github.com/google/uuid" ) // ResolveEndpointer contains the methods for resolving authority endpoints. @@ -331,7 +332,7 @@ func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error { endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName) if err != nil { - return fmt.Errorf("unable to resolve an endpoint: %s", err) + return fmt.Errorf("unable to resolve an endpoint: %w", err) } authParams.Endpoints = endpoints return nil diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go index 9d60734f88e..c3c4a96fc30 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -23,7 +23,7 @@ import ( const ( authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" - instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" + aadInstanceDiscoveryEndpoint = "https://%v/common/discovery/instance" tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" regionName = "REGION_NAME" defaultAPIVersion = "2021-10-01" @@ -47,13 +47,12 @@ type jsonCaller interface { } var aadTrustedHostList = map[string]bool{ - "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list - "login.chinacloudapi.cn": true, // Microsoft Azure China - "login.microsoftonline.de": true, // Microsoft Azure Blackforest - "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy - "login.microsoftonline.us": true, // Microsoft Azure US Government - "login.microsoftonline.com": true, // Microsoft Azure Worldwide - "login.cloudgovapi.us": true, // Microsoft Azure US Government + "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list + "login.partner.microsoftonline.cn": true, // Microsoft Azure China + "login.microsoftonline.de": true, // Microsoft Azure Blackforest + "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy + "login.microsoftonline.us": true, // Microsoft Azure US Government + "login.microsoftonline.com": true, // Microsoft Azure Worldwide } // TrustedHost checks if an AAD host is trusted/valid. @@ -137,8 +136,12 @@ const ( const ( AAD = "MSSTS" ADFS = "ADFS" + DSTS = "DSTS" ) +// DSTSTenant is referenced throughout multiple files, let us use a const in case we ever need to change it. +const DSTSTenant = "7a433bfc-2514-4697-b467-e0933190487f" + // AuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. type AuthenticationScheme interface { // Extra parameters that are added to the request to the /token endpoint. @@ -236,23 +239,26 @@ func NewAuthParams(clientID string, authorityInfo Info) AuthParams { // - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint // - the resulting authority URL is invalid func (p AuthParams) WithTenant(ID string) (AuthParams, error) { - switch ID { - case "", p.AuthorityInfo.Tenant: - // keep the default tenant because the caller didn't override it + if ID == "" || ID == p.AuthorityInfo.Tenant { return p, nil - case "common", "consumers", "organizations": - if p.AuthorityInfo.AuthorityType == AAD { + } + + var authority string + switch p.AuthorityInfo.AuthorityType { + case AAD: + if ID == "common" || ID == "consumers" || ID == "organizations" { return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID) } - // else we'll return a better error below - } - if p.AuthorityInfo.AuthorityType != AAD { - return p, errors.New("the authority doesn't support tenants") - } - if p.AuthorityInfo.Tenant == "consumers" { - return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) + if p.AuthorityInfo.Tenant == "consumers" { + return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) + } + authority = "https://" + path.Join(p.AuthorityInfo.Host, ID) + case ADFS: + return p, errors.New("ADFS authority doesn't support tenants") + case DSTS: + return p, errors.New("dSTS authority doesn't support tenants") } - authority := "https://" + path.Join(p.AuthorityInfo.Host, ID) + info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled) if err == nil { info.Region = p.AuthorityInfo.Region @@ -344,44 +350,57 @@ type Info struct { Host string CanonicalAuthorityURI string AuthorityType string - UserRealmURIPrefix string ValidateAuthority bool Tenant string Region string InstanceDiscoveryDisabled bool } -func firstPathSegment(u *url.URL) (string, error) { - pathParts := strings.Split(u.EscapedPath(), "/") - if len(pathParts) >= 2 { - return pathParts[1], nil - } - - return "", errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) -} - // NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) { - u, err := url.Parse(strings.ToLower(authority)) - if err != nil || u.Scheme != "https" { - return Info{}, errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) + + cannonicalAuthority := authority + + // suffix authority with / if it doesn't have one + if !strings.HasSuffix(cannonicalAuthority, "/") { + cannonicalAuthority += "/" } - tenant, err := firstPathSegment(u) + u, err := url.Parse(strings.ToLower(cannonicalAuthority)) + if err != nil { - return Info{}, err + return Info{}, fmt.Errorf("couldn't parse authority url: %w", err) + } + if u.Scheme != "https" { + return Info{}, errors.New("authority url scheme must be https") + } + + pathParts := strings.Split(u.EscapedPath(), "/") + if len(pathParts) < 3 { + return Info{}, errors.New(`authority must be an URL such as "https://login.microsoftonline.com/"`) } + authorityType := AAD - if tenant == "adfs" { + tenant := pathParts[1] + switch tenant { + case "adfs": authorityType = ADFS + case "dstsv2": + if len(pathParts) != 4 { + return Info{}, fmt.Errorf("dSTS authority must be an https URL such as https:///dstsv2/%s", DSTSTenant) + } + if pathParts[2] != DSTSTenant { + return Info{}, fmt.Errorf("dSTS authority only accepts a single tenant %q", DSTSTenant) + } + authorityType = DSTS + tenant = DSTSTenant } // u.Host includes the port, if any, which is required for private cloud deployments return Info{ Host: u.Host, - CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant), + CanonicalAuthorityURI: cannonicalAuthority, AuthorityType: authorityType, - UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), ValidateAuthority: validateAuthority, Tenant: tenant, InstanceDiscoveryDisabled: instanceDiscoveryDisabled, @@ -525,7 +544,7 @@ func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (I discoveryHost = authorityInfo.Host } - endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost) + endpoint := fmt.Sprintf(aadInstanceDiscoveryEndpoint, discoveryHost) err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp) } return resp, err @@ -543,17 +562,19 @@ func detectRegion(ctx context.Context) string { client := http.Client{ Timeout: time.Duration(2 * time.Second), } - req, _ := http.NewRequest("GET", imdsEndpoint, nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, imdsEndpoint, nil) req.Header.Set("Metadata", "true") resp, err := client.Do(req) + if err == nil { + defer resp.Body.Close() + } // If the request times out or there is an error, it is retried once - if err != nil || resp.StatusCode != 200 { + if err != nil || resp.StatusCode != http.StatusOK { resp, err = client.Do(req) - if err != nil || resp.StatusCode != 200 { + if err != nil || resp.StatusCode != http.StatusOK { return "" } } - defer resp.Body.Close() response, err := io.ReadAll(resp.Body) if err != nil { return "" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go index 7d9ec7cd374..d62aac74eb9 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -18,10 +18,11 @@ import ( "strings" "time" + "github.com/google/uuid" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" - "github.com/google/uuid" ) // HTTPClient represents an HTTP client. @@ -70,15 +71,13 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea unmarshal = customJSON.Unmarshal } - u, err := url.Parse(endpoint) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s?%s", endpoint, qv.Encode()), nil) if err != nil { - return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + return fmt.Errorf("could not create request: %w", err) } - u.RawQuery = qv.Encode() addStdHeaders(headers) - - req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + req.Header = headers if body != nil { // Note: In case your wondering why we are not gzip encoding.... diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go index 0ade411797a..4030ec8d8f1 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -18,9 +18,6 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" ) -// ADFS is an active directory federation service authority type. -const ADFS = "ADFS" - type cacheEntry struct { Endpoints authority.Endpoints ValidForDomainsInList map[string]bool @@ -51,7 +48,7 @@ func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo return endpoints, nil } - endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName) + endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo) if err != nil { return authority.Endpoints{}, err } @@ -83,7 +80,7 @@ func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPr defer m.mu.Unlock() if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { - if authorityInfo.AuthorityType == ADFS { + if authorityInfo.AuthorityType == authority.ADFS { domain, err := adfsDomainFromUpn(userPrincipalName) if err == nil { if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok { @@ -102,7 +99,7 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use updatedCacheEntry := createcacheEntry(endpoints) - if authorityInfo.AuthorityType == ADFS { + if authorityInfo.AuthorityType == authority.ADFS { // Since we're here, we've made a call to the backend. We want to ensure we're caching // the latest values from the server. if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { @@ -119,9 +116,12 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry } -func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) { - if authorityInfo.Tenant == "adfs" { +func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info) (string, error) { + if authorityInfo.AuthorityType == authority.ADFS { return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil + } else if authorityInfo.AuthorityType == authority.DSTS { + return fmt.Sprintf("https://%s/dstsv2/%s/v2.0/.well-known/openid-configuration", authorityInfo.Host, authority.DSTSTenant), nil + } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) { resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) if err != nil { @@ -134,7 +134,6 @@ func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, aut return "", err } return resp.TenantDiscoveryEndpoint, nil - } return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index f3ce8183dd9..2945185b0be 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -349,7 +349,7 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s if cfg.hasSSOTokenProviderConfiguration() { skippedFiles = 0 for _, f := range files { - section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))) + section, ok := f.IniData.GetSection(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName)) if ok { var ssoSession ssoSession ssoSession.setFromIniSection(section) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index d15e3c84c01..7ab65bae79e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.55.5" +const SDKVersion = "1.55.6" diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go index 548f31da2d5..f47c77a2ba0 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go @@ -297,6 +297,8 @@ const ( ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 ValidatePeerCertificateChainReq_VerificationMode = 4 // Internal use only. ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 ValidatePeerCertificateChainReq_VerificationMode = 5 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_6 ValidatePeerCertificateChainReq_VerificationMode = 6 ) // Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. @@ -308,6 +310,7 @@ var ( 3: "RESERVED_CUSTOM_VERIFICATION_MODE_3", 4: "RESERVED_CUSTOM_VERIFICATION_MODE_4", 5: "RESERVED_CUSTOM_VERIFICATION_MODE_5", + 6: "RESERVED_CUSTOM_VERIFICATION_MODE_6", } ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ "UNSPECIFIED": 0, @@ -316,6 +319,7 @@ var ( "RESERVED_CUSTOM_VERIFICATION_MODE_3": 3, "RESERVED_CUSTOM_VERIFICATION_MODE_4": 4, "RESERVED_CUSTOM_VERIFICATION_MODE_5": 5, + "RESERVED_CUSTOM_VERIFICATION_MODE_6": 6, } ) @@ -1978,8 +1982,8 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf4, - 0x05, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x9d, + 0x06, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, @@ -2013,7 +2017,7 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x22, 0xc1, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x69, 0x63, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, @@ -2025,141 +2029,143 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x34, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, - 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, - 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, - 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, - 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, - 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, - 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, - 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, - 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, - 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, - 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, - 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, - 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, - 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, - 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, + 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, + 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x36, 0x10, 0x06, + 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, + 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, + 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, + 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, + 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, + 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, + 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, + 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, + 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, + 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, + 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, + 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, - 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, + 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, - 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04, - 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, - 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, - 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, - 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, - 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, - 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, - 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, - 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, - 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, - 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, - 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, - 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, - 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, - 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, - 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, - 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, - 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, - 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, + 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, + 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, + 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, + 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, + 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, + 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, + 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, - 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, - 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, - 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, - 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, - 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, - 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, - 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, - 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, - 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, - 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, - 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, - 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, - 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, - 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, - 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, - 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, + 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, + 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, + 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, + 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, + 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, + 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, + 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, + 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, + 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, + 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, + 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, + 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go index a6402ee48cc..0cc78547e9f 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -64,13 +64,13 @@ type s2av2TransportCreds struct { localIdentities []*commonpb.Identity verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode fallbackClientHandshake fallback.ClientHandshake - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + getS2AStream stream.GetS2AStream serverAuthorizationPolicy []byte } // NewClientCreds returns a client-side transport credentials object that uses // the S2Av2 to establish a secure connection with a server. -func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { +func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream stream.GetS2AStream, serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() @@ -101,7 +101,7 @@ func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCre // NewServerCreds returns a server-side transport credentials object that uses // the S2Av2 to establish a secure connection with a client. -func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { +func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream stream.GetS2AStream) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() creds := &s2av2TransportCreds{ @@ -306,8 +306,9 @@ func NewClientTLSConfig( tokenManager tokenmanager.AccessTokenManager, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverName string, - serverAuthorizationPolicy []byte) (*tls.Config, error) { - s2AStream, err := createStream(ctx, s2av2Address, transportCreds, nil) + serverAuthorizationPolicy []byte, + getStream stream.GetS2AStream) (*tls.Config, error) { + s2AStream, err := createStream(ctx, s2av2Address, transportCreds, getStream) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) return nil, err @@ -350,7 +351,7 @@ func (x s2AGrpcStream) CloseSend() error { return x.stream.CloseSend() } -func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { +func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream stream.GetS2AStream) (stream.S2AStream, error) { if getS2AStream != nil { return getS2AStream(ctx, s2av2Address) } diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go index fa0002e36b7..6ca75f56080 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go +++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go @@ -75,7 +75,7 @@ func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStr return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) } - // Extract TLS configiguration from SessionResp. + // Extract TLS configuration from SessionResp. tlsConfig := resp.GetGetTlsConfigurationResp().GetClientTlsConfiguration() var cert tls.Certificate diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go index cc79bd09a67..c52fccddf8c 100644 --- a/vendor/github.com/google/s2a-go/s2a.go +++ b/vendor/github.com/google/s2a-go/s2a.go @@ -35,6 +35,7 @@ import ( "github.com/google/s2a-go/internal/tokenmanager" "github.com/google/s2a-go/internal/v2" "github.com/google/s2a-go/retry" + "github.com/google/s2a-go/stream" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/protobuf/proto" @@ -330,6 +331,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err tokenManager: nil, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + getStream: opts.getS2AStream, }, nil } return &s2aTLSClientConfigFactory{ @@ -338,6 +340,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err tokenManager: tokenManager, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + getStream: opts.getS2AStream, }, nil } @@ -347,6 +350,7 @@ type s2aTLSClientConfigFactory struct { tokenManager tokenmanager.AccessTokenManager verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode serverAuthorizationPolicy []byte + getStream stream.GetS2AStream } func (f *s2aTLSClientConfigFactory) Build( @@ -355,7 +359,7 @@ func (f *s2aTLSClientConfigFactory) Build( if opts != nil && opts.ServerName != "" { serverName = opts.ServerName } - return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) + return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy, f.getStream) } func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { @@ -370,6 +374,8 @@ func getVerificationMode(verificationMode VerificationModeType) s2av2pb.Validate return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 case ReservedCustomVerificationMode5: return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 + case ReservedCustomVerificationMode6: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_6 default: return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED } diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go index 5bbf31bf412..b7a277f9e3f 100644 --- a/vendor/github.com/google/s2a-go/s2a_options.go +++ b/vendor/github.com/google/s2a-go/s2a_options.go @@ -19,7 +19,6 @@ package s2a import ( - "context" "crypto/tls" "errors" "sync" @@ -28,7 +27,7 @@ import ( "github.com/google/s2a-go/stream" "google.golang.org/grpc/credentials" - s2apbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + s2av1pb "github.com/google/s2a-go/internal/proto/common_go_proto" s2apb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) @@ -36,6 +35,17 @@ import ( type Identity interface { // Name returns the name of the identity. Name() string + Attributes() map[string]string +} + +type UnspecifiedID struct { + Attr map[string]string +} + +func (u *UnspecifiedID) Name() string { return "" } + +func (u *UnspecifiedID) Attributes() map[string]string { + return u.Attr } type spiffeID struct { @@ -44,10 +54,10 @@ type spiffeID struct { func (s *spiffeID) Name() string { return s.spiffeID } +func (spiffeID) Attributes() map[string]string { return nil } + // NewSpiffeID creates a SPIFFE ID from id. -func NewSpiffeID(id string) Identity { - return &spiffeID{spiffeID: id} -} +func NewSpiffeID(id string) Identity { return &spiffeID{spiffeID: id} } type hostname struct { hostname string @@ -55,10 +65,10 @@ type hostname struct { func (h *hostname) Name() string { return h.hostname } +func (hostname) Attributes() map[string]string { return nil } + // NewHostname creates a hostname from name. -func NewHostname(name string) Identity { - return &hostname{hostname: name} -} +func NewHostname(name string) Identity { return &hostname{hostname: name} } type uid struct { uid string @@ -66,10 +76,10 @@ type uid struct { func (h *uid) Name() string { return h.uid } +func (uid) Attributes() map[string]string { return nil } + // NewUID creates a UID from name. -func NewUID(name string) Identity { - return &uid{uid: name} -} +func NewUID(name string) Identity { return &uid{uid: name} } // VerificationModeType specifies the mode that S2A must use to verify the peer // certificate chain. @@ -83,6 +93,7 @@ const ( ReservedCustomVerificationMode3 ReservedCustomVerificationMode4 ReservedCustomVerificationMode5 + ReservedCustomVerificationMode6 ) // ClientOptions contains the client-side options used to establish a secure @@ -137,7 +148,7 @@ type ClientOptions struct { FallbackOpts *FallbackOptions // Generates an S2AStream interface for talking to the S2A server. - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + getS2AStream stream.GetS2AStream // Serialized user specified policy for server authorization. serverAuthorizationPolicy []byte @@ -191,7 +202,7 @@ type ServerOptions struct { VerificationMode VerificationModeType // Generates an S2AStream interface for talking to the S2A server. - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + getS2AStream stream.GetS2AStream } // DefaultServerOptions returns the default server options. @@ -202,17 +213,30 @@ func DefaultServerOptions(s2aAddress string) *ServerOptions { } } -func toProtoIdentity(identity Identity) (*s2apbv1.Identity, error) { +func toProtoIdentity(identity Identity) (*s2av1pb.Identity, error) { if identity == nil { return nil, nil } switch id := identity.(type) { case *spiffeID: - return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + return &s2av1pb.Identity{ + IdentityOneof: &s2av1pb.Identity_SpiffeId{SpiffeId: id.Name()}, + Attributes: id.Attributes(), + }, nil case *hostname: - return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Hostname{Hostname: id.Name()}}, nil + return &s2av1pb.Identity{ + IdentityOneof: &s2av1pb.Identity_Hostname{Hostname: id.Name()}, + Attributes: id.Attributes(), + }, nil case *uid: - return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Uid{Uid: id.Name()}}, nil + return &s2av1pb.Identity{ + IdentityOneof: &s2av1pb.Identity_Uid{Uid: id.Name()}, + Attributes: id.Attributes(), + }, nil + case *UnspecifiedID: + return &s2av1pb.Identity{ + Attributes: id.Attributes(), + }, nil default: return nil, errors.New("unrecognized identity type") } @@ -224,11 +248,24 @@ func toV2ProtoIdentity(identity Identity) (*s2apb.Identity, error) { } switch id := identity.(type) { case *spiffeID: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + return &s2apb.Identity{ + IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}, + Attributes: id.Attributes(), + }, nil case *hostname: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil + return &s2apb.Identity{ + IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}, + Attributes: id.Attributes(), + }, nil case *uid: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil + return &s2apb.Identity{ + IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}, + Attributes: id.Attributes(), + }, nil + case *UnspecifiedID: + return &s2apb.Identity{ + Attributes: id.Attributes(), + }, nil default: return nil, errors.New("unrecognized identity type") } diff --git a/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/vendor/github.com/google/s2a-go/stream/s2a_stream.go index 584bf32b1c7..ae2d5eb4c16 100644 --- a/vendor/github.com/google/s2a-go/stream/s2a_stream.go +++ b/vendor/github.com/google/s2a-go/stream/s2a_stream.go @@ -20,6 +20,8 @@ package stream import ( + "context" + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -32,3 +34,6 @@ type S2AStream interface { // Closes the channel to the S2A server. CloseSend() error } + +// GetS2AStream type is for generating an S2AStream interface for talking to the S2A server. +type GetS2AStream func(ctx context.Context, s2av2Address string, opts ...string) (S2AStream, error) diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 29a5900c7da..a8c082dd61e 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.14.0" + "v2": "2.14.1" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index 9fb9035908d..17cced15eca 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,17 @@ # Changelog +## [2.14.1](https://github.com/googleapis/gax-go/compare/v2.14.0...v2.14.1) (2024-12-19) + + +### Bug Fixes + +* update golang.org/x/net to v0.33.0 ([#391](https://github.com/googleapis/gax-go/issues/391)) ([547a5b4](https://github.com/googleapis/gax-go/commit/547a5b43aa6f376f71242da9f18e65fbdfb342f6)) + + +### Documentation + +* fix godoc to refer to the proper envvar ([#387](https://github.com/googleapis/gax-go/issues/387)) ([dc6baf7](https://github.com/googleapis/gax-go/commit/dc6baf75c1a737233739630b5af6c9759f08abcd)) + ## [2.14.0](https://github.com/googleapis/gax-go/compare/v2.13.0...v2.14.0) (2024-11-13) diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 88288934549..2b284a24a48 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.14.0" +const Version = "2.14.1" diff --git a/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go index 91b648a6a4c..e47ab32acc2 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go +++ b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go @@ -44,7 +44,7 @@ import ( // New returns a new [slog.Logger] default logger, or the provided logger if // non-nil. The returned logger will be a no-op logger unless the environment -// variable GOOGLE_SDK_DEBUG_LOGGING is set. +// variable GOOGLE_SDK_GO_LOGGING_LEVEL is set. func New(l *slog.Logger) *slog.Logger { if l != nil { return l diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index 93fb09922fb..0a1ca7e06fe 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -141,7 +141,7 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin } // Check if oneof already set - if of := fieldDescriptor.ContainingOneof(); of != nil { + if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() { if f := msgValue.WhichOneof(of); f != nil { return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) } diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 8d5a2a47893..9831c37baf5 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -85,6 +85,7 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://github.com/wintbiit/NineDNS * https://linuxcontainers.org/incus/ * https://ifconfig.es +* https://github.com/zmap/zdns Send pull request if you want to be listed here. diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go index 1be87eae634..ffdafcebda9 100644 --- a/vendor/github.com/miekg/dns/dnssec.go +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -250,14 +250,6 @@ func (d *DS) ToCDS() *CDS { // zero, it is used as-is, otherwise the TTL of the RRset is used as the // OrigTTL. func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { - if k == nil { - return ErrPrivKey - } - // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return ErrKey - } - h0 := rrset[0].Header() rr.Hdr.Rrtype = TypeRRSIG rr.Hdr.Name = h0.Name @@ -272,6 +264,18 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { rr.Labels-- // wildcard, remove from label count } + return rr.signAsIs(k, rrset) +} + +func (rr *RRSIG) signAsIs(k crypto.Signer, rrset []RR) error { + if k == nil { + return ErrPrivKey + } + // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + sigwire := new(rrsigWireFmt) sigwire.TypeCovered = rr.TypeCovered sigwire.Algorithm = rr.Algorithm @@ -370,9 +374,12 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { if rr.Algorithm != k.Algorithm { return ErrKey } - if !strings.EqualFold(rr.SignerName, k.Hdr.Name) { + + signerName := CanonicalName(rr.SignerName) + if !equal(signerName, k.Hdr.Name) { return ErrKey } + if k.Protocol != 3 { return ErrKey } @@ -384,9 +391,18 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { } // IsRRset checked that we have at least one RR and that the RRs in - // the set have consistent type, class, and name. Also check that type and - // class matches the RRSIG record. - if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered { + // the set have consistent type, class, and name. Also check that type, + // class and name matches the RRSIG record. + // Also checks RFC 4035 5.3.1 the number of labels in the RRset owner + // name MUST be greater than or equal to the value in the RRSIG RR's Labels field. + // RFC 4035 5.3.1 Signer's Name MUST be the name of the zone that [contains the RRset]. + // Since we don't have SOA info, checking suffix may be the best we can do...? + if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || + h0.Rrtype != rr.TypeCovered || + uint8(CountLabel(h0.Name)) < rr.Labels || + !equal(h0.Name, rr.Hdr.Name) || + !strings.HasSuffix(CanonicalName(h0.Name), signerName) { + return ErrRRset } @@ -400,7 +416,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { sigwire.Expiration = rr.Expiration sigwire.Inception = rr.Inception sigwire.KeyTag = rr.KeyTag - sigwire.SignerName = CanonicalName(rr.SignerName) + sigwire.SignerName = signerName // Create the desired binary blob signeddata := make([]byte, DefaultMsgSize) n, err := packSigWire(sigwire, signeddata) diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index c1bbdaae2e9..0447fd826a5 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -58,7 +58,7 @@ func makeDataOpt(code uint16) EDNS0 { case EDNS0EDE: return new(EDNS0_EDE) case EDNS0ESU: - return &EDNS0_ESU{Code: EDNS0ESU} + return new(EDNS0_ESU) default: e := new(EDNS0_LOCAL) e.Code = code @@ -66,8 +66,7 @@ func makeDataOpt(code uint16) EDNS0 { } } -// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. -// See RFC 6891. +// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. See RFC 6891. type OPT struct { Hdr RR_Header Option []EDNS0 `dns:"opt"` @@ -144,8 +143,6 @@ func (*OPT) parse(c *zlexer, origin string) *ParseError { func (rr *OPT) isDuplicate(r2 RR) bool { return false } -// return the old value -> delete SetVersion? - // Version returns the EDNS version used. Only zero is defined. func (rr *OPT) Version() uint8 { return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16) @@ -236,8 +233,8 @@ type EDNS0 interface { // e.Nsid = "AA" // o.Option = append(o.Option, e) type EDNS0_NSID struct { - Code uint16 // Always EDNS0NSID - Nsid string // This string needs to be hex encoded + Code uint16 // always EDNS0NSID + Nsid string // string needs to be hex encoded } func (e *EDNS0_NSID) pack() ([]byte, error) { @@ -275,7 +272,7 @@ func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} // When packing it will apply SourceNetmask. If you need more advanced logic, // patches welcome and good luck. type EDNS0_SUBNET struct { - Code uint16 // Always EDNS0SUBNET + Code uint16 // always EDNS0SUBNET Family uint16 // 1 for IP, 2 for IP6 SourceNetmask uint8 SourceScope uint8 @@ -399,8 +396,8 @@ func (e *EDNS0_SUBNET) copy() EDNS0 { // // There is no guarantee that the Cookie string has a specific length. type EDNS0_COOKIE struct { - Code uint16 // Always EDNS0COOKIE - Cookie string // Hex-encoded cookie data + Code uint16 // always EDNS0COOKIE + Cookie string // hex encoded cookie data } func (e *EDNS0_COOKIE) pack() ([]byte, error) { @@ -430,7 +427,7 @@ func (e *EDNS0_COOKIE) copy() EDNS0 { return &EDNS0_COOKIE{e.Code, e.C // e.Lease = 120 // in seconds // o.Option = append(o.Option, e) type EDNS0_UL struct { - Code uint16 // Always EDNS0UL + Code uint16 // always EDNS0UL Lease uint32 KeyLease uint32 } @@ -469,7 +466,7 @@ func (e *EDNS0_UL) unpack(b []byte) error { // EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 // Implemented for completeness, as the EDNS0 type code is assigned. type EDNS0_LLQ struct { - Code uint16 // Always EDNS0LLQ + Code uint16 // always EDNS0LLQ Version uint16 Opcode uint16 Error uint16 @@ -515,7 +512,7 @@ func (e *EDNS0_LLQ) copy() EDNS0 { // EDNS0_DAU implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. type EDNS0_DAU struct { - Code uint16 // Always EDNS0DAU + Code uint16 // always EDNS0DAU AlgCode []uint8 } @@ -539,7 +536,7 @@ func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} } // EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975. type EDNS0_DHU struct { - Code uint16 // Always EDNS0DHU + Code uint16 // always EDNS0DHU AlgCode []uint8 } @@ -563,7 +560,7 @@ func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} } // EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975. type EDNS0_N3U struct { - Code uint16 // Always EDNS0N3U + Code uint16 // always EDNS0N3U AlgCode []uint8 } @@ -588,7 +585,7 @@ func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} } // EDNS0_EXPIRE implements the EDNS0 option as described in RFC 7314. type EDNS0_EXPIRE struct { - Code uint16 // Always EDNS0EXPIRE + Code uint16 // always EDNS0EXPIRE Expire uint32 Empty bool // Empty is used to signal an empty Expire option in a backwards compatible way, it's not used on the wire. } @@ -668,7 +665,7 @@ func (e *EDNS0_LOCAL) unpack(b []byte) error { // EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep // the TCP connection alive. See RFC 7828. type EDNS0_TCP_KEEPALIVE struct { - Code uint16 // Always EDNSTCPKEEPALIVE + Code uint16 // always EDNSTCPKEEPALIVE // Timeout is an idle timeout value for the TCP connection, specified in // units of 100 milliseconds, encoded in network byte order. If set to 0, @@ -839,13 +836,12 @@ func (e *EDNS0_EDE) unpack(b []byte) error { return nil } -// The EDNS0_ESU option for ENUM Source-URI Extension +// The EDNS0_ESU option for ENUM Source-URI Extension. type EDNS0_ESU struct { - Code uint16 + Code uint16 // always EDNS0ESU Uri string } -// Option implements the EDNS0 interface. func (e *EDNS0_ESU) Option() uint16 { return EDNS0ESU } func (e *EDNS0_ESU) String() string { return e.Uri } func (e *EDNS0_ESU) copy() EDNS0 { return &EDNS0_ESU{e.Code, e.Uri} } diff --git a/vendor/github.com/miekg/dns/listen_no_reuseport.go b/vendor/github.com/miekg/dns/listen_no_socket_options.go similarity index 61% rename from vendor/github.com/miekg/dns/listen_no_reuseport.go rename to vendor/github.com/miekg/dns/listen_no_socket_options.go index 8cebb2f171c..9e4010bdcc6 100644 --- a/vendor/github.com/miekg/dns/listen_no_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_no_socket_options.go @@ -3,9 +3,15 @@ package dns -import "net" +import ( + "fmt" + "net" +) -const supportsReusePort = false +const ( + supportsReusePort = false + supportsReuseAddr = false +) func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) { if reuseport || reuseaddr { @@ -15,8 +21,6 @@ func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, e return net.Listen(network, addr) } -const supportsReuseAddr = false - func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, error) { if reuseport || reuseaddr { // TODO(tmthrgd): return an error? @@ -24,3 +28,13 @@ func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, return net.ListenPacket(network, addr) } + +// this is just for test compatibility +func checkReuseport(fd uintptr) (bool, error) { + return false, fmt.Errorf("not supported") +} + +// this is just for test compatibility +func checkReuseaddr(fd uintptr) (bool, error) { + return false, fmt.Errorf("not supported") +} diff --git a/vendor/github.com/miekg/dns/listen_reuseport.go b/vendor/github.com/miekg/dns/listen_socket_options.go similarity index 66% rename from vendor/github.com/miekg/dns/listen_reuseport.go rename to vendor/github.com/miekg/dns/listen_socket_options.go index 41326f20b71..35dfc9498ab 100644 --- a/vendor/github.com/miekg/dns/listen_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_socket_options.go @@ -39,10 +39,40 @@ func reuseaddrControl(network, address string, c syscall.RawConn) error { return opErr } +func reuseaddrandportControl(network, address string, c syscall.RawConn) error { + err := reuseaddrControl(network, address, c) + if err != nil { + return err + } + + return reuseportControl(network, address, c) +} + +// this is just for test compatibility +func checkReuseport(fd uintptr) (bool, error) { + v, err := unix.GetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT) + if err != nil { + return false, err + } + + return v == 1, nil +} + +// this is just for test compatibility +func checkReuseaddr(fd uintptr) (bool, error) { + v, err := unix.GetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEADDR) + if err != nil { + return false, err + } + + return v == 1, nil +} + func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) { var lc net.ListenConfig switch { case reuseaddr && reuseport: + lc.Control = reuseaddrandportControl case reuseport: lc.Control = reuseportControl case reuseaddr: @@ -56,6 +86,7 @@ func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, var lc net.ListenConfig switch { case reuseaddr && reuseport: + lc.Control = reuseaddrandportControl case reuseport: lc.Control = reuseportControl case reuseaddr: diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index 81580d1e5fd..b04d370f682 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -226,6 +226,7 @@ type Server struct { // If NotifyStartedFunc is set it is called once the server has started listening. NotifyStartedFunc func() // DecorateReader is optional, allows customization of the process that reads raw DNS messages. + // The decorated reader must not mutate the data read from the conn. DecorateReader DecorateReader // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. DecorateWriter DecorateWriter diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go index 2c4b103521c..057bb578739 100644 --- a/vendor/github.com/miekg/dns/sig0.go +++ b/vendor/github.com/miekg/dns/sig0.go @@ -7,7 +7,6 @@ import ( "crypto/rsa" "encoding/binary" "math/big" - "strings" "time" ) @@ -151,7 +150,7 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error { } // If key has come from the DNS name compression might // have mangled the case of the name - if !strings.EqualFold(signername, k.Header().Name) { + if !equal(signername, k.Header().Name) { return &Error{err: "signer name doesn't match key name"} } sigend := offset diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index 00c8629f278..e290e3dff74 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 62} +var Version = v{1, 1, 63} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/LICENSE b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/doc.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/doc.go new file mode 100644 index 00000000000..457bd743449 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// identity types for metrics and sample streams. +// +// Use the `Of*(T) -> I` functions to obtain a unique, comparable (==) and +// hashable (map key) identity value I for T. +package identity // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/metric.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/metric.go new file mode 100644 index 00000000000..ceaea90b8e0 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/metric.go @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package identity // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" + +import ( + "hash" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +type metric = Metric + +type Metric struct { + scope + + name string + unit string + ty pmetric.MetricType + + monotonic bool + temporality pmetric.AggregationTemporality +} + +func (i Metric) Hash() hash.Hash64 { + sum := i.scope.Hash() + sum.Write([]byte(i.name)) + sum.Write([]byte(i.unit)) + + var mono byte + if i.monotonic { + mono = 1 + } + sum.Write([]byte{byte(i.ty), mono, byte(i.temporality)}) + return sum +} + +func (i Metric) Scope() Scope { + return i.scope +} + +func OfMetric(scope Scope, m pmetric.Metric) Metric { + id := Metric{ + scope: scope, + name: m.Name(), + unit: m.Unit(), + ty: m.Type(), + } + + switch m.Type() { + case pmetric.MetricTypeSum: + sum := m.Sum() + id.monotonic = sum.IsMonotonic() + id.temporality = sum.AggregationTemporality() + case pmetric.MetricTypeExponentialHistogram: + exp := m.ExponentialHistogram() + id.monotonic = true + id.temporality = exp.AggregationTemporality() + case pmetric.MetricTypeHistogram: + hist := m.Histogram() + id.monotonic = true + id.temporality = hist.AggregationTemporality() + } + + return id +} + +func OfResourceMetric(res pcommon.Resource, scope pcommon.InstrumentationScope, metric pmetric.Metric) Metric { + return OfMetric(OfScope(OfResource(res), scope), metric) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/resource.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/resource.go new file mode 100644 index 00000000000..990fb71e64e --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/resource.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package identity // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" + +import ( + "hash" + "hash/fnv" + + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" +) + +type resource = Resource + +type Resource struct { + attrs [16]byte +} + +func (r Resource) Hash() hash.Hash64 { + sum := fnv.New64a() + sum.Write(r.attrs[:]) + return sum +} + +func OfResource(r pcommon.Resource) Resource { + return Resource{ + attrs: pdatautil.MapHash(r.Attributes()), + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/scope.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/scope.go new file mode 100644 index 00000000000..db516bc14c7 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/scope.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package identity // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" + +import ( + "hash" + + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" +) + +type scope = Scope + +type Scope struct { + resource resource + + name string + version string + attrs [16]byte +} + +func (s Scope) Hash() hash.Hash64 { + sum := s.resource.Hash() + sum.Write([]byte(s.name)) + sum.Write([]byte(s.version)) + sum.Write(s.attrs[:]) + return sum +} + +func (s Scope) Resource() Resource { + return s.resource +} + +func OfScope(res Resource, scope pcommon.InstrumentationScope) Scope { + return Scope{ + resource: res, + name: scope.Name(), + version: scope.Version(), + attrs: pdatautil.MapHash(scope.Attributes()), + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/stream.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/stream.go new file mode 100644 index 00000000000..19988f7730d --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/stream.go @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package identity // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" + +import ( + "hash" + + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" +) + +type Stream struct { + metric + attrs [16]byte +} + +func (i Stream) Hash() hash.Hash64 { + sum := i.metric.Hash() + sum.Write(i.attrs[:]) + return sum +} + +func (i Stream) Metric() Metric { + return i.metric +} + +func OfStream[DataPoint attrPoint](m Metric, dp DataPoint) Stream { + return Stream{metric: m, attrs: pdatautil.MapHash(dp.Attributes())} +} + +type attrPoint interface { + Attributes() pcommon.Map +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/strings.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/strings.go new file mode 100644 index 00000000000..7339f95a578 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/strings.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package identity // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" + +import ( + "fmt" +) + +func (r Resource) String() string { + return fmt.Sprintf("resource/%x", r.Hash().Sum64()) +} + +func (s Scope) String() string { + return fmt.Sprintf("scope/%x", s.Hash().Sum64()) +} + +func (m Metric) String() string { + return fmt.Sprintf("metric/%x", m.Hash().Sum64()) +} + +func (s Stream) String() string { + return fmt.Sprintf("stream/%x", s.Hash().Sum64()) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness/priority_queue.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness/priority_queue.go new file mode 100644 index 00000000000..f1b01743f95 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness/priority_queue.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package staleness // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness" + +import ( + "container/heap" + "time" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" +) + +// PriorityQueue represents a way to store entries sorted by their priority. +// Pop() will return the oldest entry of the set. +type PriorityQueue interface { + // Update will add or update an entry, and reshuffle the queue internally as needed to keep it sorted + Update(id identity.Stream, newPrio time.Time) + // Peek will return the entry at the HEAD of the queue *without* removing it from the queue + Peek() (identity.Stream, time.Time) + // Pop will remove the entry at the HEAD of the queue and return it + Pop() (identity.Stream, time.Time) + // Len will return the number of entries in the queue + Len() int +} + +// heapQueue implements heap.Interface. +// We use it as the inner implementation of a heap-based sorted queue +type heapQueue []*queueItem + +type queueItem struct { + key identity.Stream + prio time.Time + index int +} + +func (pq heapQueue) Len() int { return len(pq) } + +func (pq heapQueue) Less(i, j int) bool { + // We want Pop to give us the lowest priority + return pq[i].prio.Before(pq[j].prio) +} + +func (pq heapQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *heapQueue) Push(x any) { + n := len(*pq) + item := x.(*queueItem) + item.index = n + *pq = append(*pq, item) +} + +func (pq *heapQueue) Pop() any { + old := *pq + n := len(old) + item := old[n-1] + old[n-1] = nil // avoid memory leak + item.index = -1 // for safety + *pq = old[0 : n-1] + return item +} + +type heapPriorityQueue struct { + inner heapQueue + itemLookup map[identity.Stream]*queueItem +} + +func NewPriorityQueue() PriorityQueue { + pq := &heapPriorityQueue{ + inner: heapQueue{}, + itemLookup: map[identity.Stream]*queueItem{}, + } + heap.Init(&pq.inner) + + return pq +} + +func (pq *heapPriorityQueue) Update(id identity.Stream, newPrio time.Time) { + // Check if the entry already exists in the queue + item, ok := pq.itemLookup[id] + if ok { + // If so, we can update it in place + item.prio = newPrio + heap.Fix(&pq.inner, item.index) + } else { + item = &queueItem{ + key: id, + prio: newPrio, + } + heap.Push(&pq.inner, item) + pq.itemLookup[id] = item + } +} + +func (pq *heapPriorityQueue) Peek() (identity.Stream, time.Time) { + val := pq.inner[0] + return val.key, val.prio +} + +func (pq *heapPriorityQueue) Pop() (identity.Stream, time.Time) { + val := heap.Pop(&pq.inner).(*queueItem) + delete(pq.itemLookup, val.key) + return val.key, val.prio +} + +func (pq *heapPriorityQueue) Len() int { + return pq.inner.Len() +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness/staleness.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness/staleness.go new file mode 100644 index 00000000000..eb52e686182 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness/staleness.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package staleness // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness" + +import ( + "time" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams" +) + +// We override how Now() is returned, so we can have deterministic tests +var NowFunc = time.Now + +var ( + _ streams.Map[any] = (*Staleness[any])(nil) + _ streams.Evictor = (*Staleness[any])(nil) +) + +// Staleness a a wrapper over a map that adds an additional "staleness" value to each entry. Users can +// call ExpireOldEntries() to automatically remove all entries from the map whole staleness value is +// older than the `max` +// +// NOTE: Staleness methods are *not* thread-safe. If the user needs to use Staleness in a multi-threaded +// environment, then it is the user's responsibility to properly serialize calls to Staleness methods +type Staleness[T any] struct { + Max time.Duration + + items streams.Map[T] + pq PriorityQueue +} + +func NewStaleness[T any](max time.Duration, items streams.Map[T]) *Staleness[T] { + return &Staleness[T]{ + Max: max, + + items: items, + pq: NewPriorityQueue(), + } +} + +// Load the value at key. If it does not exist, the boolean will be false and the value returned will be the zero value +func (s *Staleness[T]) Load(id identity.Stream) (T, bool) { + return s.items.Load(id) +} + +// Store the given key value pair in the map, and update the pair's staleness value to "now" +func (s *Staleness[T]) Store(id identity.Stream, v T) error { + s.pq.Update(id, NowFunc()) + return s.items.Store(id, v) +} + +func (s *Staleness[T]) Delete(id identity.Stream) { + s.items.Delete(id) +} + +// Items returns an iterator function that in future go version can be used with range +// See: https://go.dev/wiki/RangefuncExperiment +func (s *Staleness[T]) Items() func(yield func(identity.Stream, T) bool) bool { + return s.items.Items() +} + +// ExpireOldEntries will remove all entries whose staleness value is older than `now() - max` +// For example, if an entry has a staleness value of two hours ago, and max == 1 hour, then the entry would +// be removed. But if an entry had a stalness value of 30 minutes, then it *wouldn't* be removed. +func (s *Staleness[T]) ExpireOldEntries() { + now := NowFunc() + for { + if s.Len() == 0 { + return + } + _, ts := s.pq.Peek() + if now.Sub(ts) < s.Max { + break + } + id, _ := s.pq.Pop() + s.items.Delete(id) + } +} + +func (s *Staleness[T]) Len() int { + return s.items.Len() +} + +func (s *Staleness[T]) Next() time.Time { + _, ts := s.pq.Peek() + return ts +} + +func (s *Staleness[T]) Evict() (identity.Stream, bool) { + _, ts := s.pq.Peek() + if NowFunc().Sub(ts) < s.Max { + return identity.Stream{}, false + } + + id, _ := s.pq.Pop() + s.items.Delete(id) + return id, true +} + +func (s *Staleness[T]) Clear() { + s.items.Clear() +} + +type Tracker struct { + pq PriorityQueue +} + +func NewTracker() Tracker { + return Tracker{pq: NewPriorityQueue()} +} + +func (stale Tracker) Refresh(ts time.Time, ids ...identity.Stream) { + for _, id := range ids { + stale.pq.Update(id, ts) + } +} + +func (stale Tracker) Collect(max time.Duration) []identity.Stream { + now := NowFunc() + + var ids []identity.Stream + for stale.pq.Len() > 0 { + _, ts := stale.pq.Peek() + if now.Sub(ts) < max { + break + } + id, _ := stale.pq.Pop() + ids = append(ids, id) + } + + return ids +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams/streams.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams/streams.go new file mode 100644 index 00000000000..5f0d715b696 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams/streams.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package streams // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams" + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" +) + +// Sequence of streams that can be iterated upon +type Seq[T any] func(yield func(identity.Stream, T) bool) bool + +// Map defines a collection of items tracked by a stream-id and the operations +// on it +type Map[T any] interface { + Load(identity.Stream) (T, bool) + Store(identity.Stream, T) error + Delete(identity.Stream) + Items() func(yield func(identity.Stream, T) bool) bool + Len() int + Clear() +} + +var _ Map[any] = HashMap[any](nil) + +type HashMap[T any] map[identity.Stream]T + +func (m HashMap[T]) Load(id identity.Stream) (T, bool) { + v, ok := (map[identity.Stream]T)(m)[id] + return v, ok +} + +func (m HashMap[T]) Store(id identity.Stream, v T) error { + (map[identity.Stream]T)(m)[id] = v + return nil +} + +func (m HashMap[T]) Delete(id identity.Stream) { + delete((map[identity.Stream]T)(m), id) +} + +func (m HashMap[T]) Items() func(yield func(identity.Stream, T) bool) bool { + return func(yield func(identity.Stream, T) bool) bool { + for id, v := range (map[identity.Stream]T)(m) { + if !yield(id, v) { + break + } + } + return false + } +} + +func (m HashMap[T]) Len() int { + return len((map[identity.Stream]T)(m)) +} + +func (m HashMap[T]) Clear() { + clear(m) +} + +// Evictors remove the "least important" stream based on some strategy such as +// the oldest, least active, etc. +// +// Returns whether a stream was evicted and if so the now gone stream id +type Evictor interface { + Evict() (gone identity.Stream, ok bool) +} + +type DataPointSlice[DP DataPoint[DP]] interface { + Len() int + At(i int) DP + AppendEmpty() DP +} + +type DataPoint[Self any] interface { + Timestamp() pcommon.Timestamp + Attributes() pcommon.Map + CopyTo(dest Self) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/LICENSE b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/Makefile b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go new file mode 100644 index 00000000000..172789c607b --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pdatautil // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" + +import ( + "encoding/binary" + "math" + "sort" + "sync" + + "github.com/cespare/xxhash/v2" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +var ( + extraByte = []byte{'\xf3'} + keyPrefix = []byte{'\xf4'} + valEmpty = []byte{'\xf5'} + valBytesPrefix = []byte{'\xf6'} + valStrPrefix = []byte{'\xf7'} + valBoolTrue = []byte{'\xf8'} + valBoolFalse = []byte{'\xf9'} + valIntPrefix = []byte{'\xfa'} + valDoublePrefix = []byte{'\xfb'} + valMapPrefix = []byte{'\xfc'} + valMapSuffix = []byte{'\xfd'} + valSlicePrefix = []byte{'\xfe'} + valSliceSuffix = []byte{'\xff'} + + emptyHash = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} +) + +// HashOption is a function that sets an option on the hash calculation. +type HashOption func(*hashWriter) + +// WithMap adds a map to the hash calculation. +func WithMap(m pcommon.Map) HashOption { + return func(hw *hashWriter) { + hw.writeMapHash(m) + } +} + +// WithValue adds a value to the hash calculation. +func WithValue(v pcommon.Value) HashOption { + return func(hw *hashWriter) { + hw.writeValueHash(v) + } +} + +// WithString adds a string to the hash calculation. +func WithString(s string) HashOption { + return func(hw *hashWriter) { + hw.byteBuf = append(hw.byteBuf, valStrPrefix...) + hw.byteBuf = append(hw.byteBuf, s...) + } +} + +type hashWriter struct { + byteBuf []byte + keysBuf []string +} + +func newHashWriter() *hashWriter { + return &hashWriter{ + byteBuf: make([]byte, 0, 512), + keysBuf: make([]string, 0, 16), + } +} + +var hashWriterPool = &sync.Pool{ + New: func() any { return newHashWriter() }, +} + +// Hash generates a hash for the provided options and returns the computed hash as a [16]byte. +func Hash(opts ...HashOption) [16]byte { + if len(opts) == 0 { + return emptyHash + } + + hw := hashWriterPool.Get().(*hashWriter) + defer hashWriterPool.Put(hw) + hw.byteBuf = hw.byteBuf[:0] + + for _, o := range opts { + o(hw) + } + + return hw.hashSum128() +} + +// Hash64 generates a hash for the provided options and returns the computed hash as a uint64. +func Hash64(opts ...HashOption) uint64 { + hash := Hash(opts...) + return xxhash.Sum64(hash[:]) +} + +// MapHash return a hash for the provided map. +// Maps with the same underlying key/value pairs in different order produce the same deterministic hash value. +func MapHash(m pcommon.Map) [16]byte { + if m.Len() == 0 { + return emptyHash + } + + hw := hashWriterPool.Get().(*hashWriter) + defer hashWriterPool.Put(hw) + hw.byteBuf = hw.byteBuf[:0] + + hw.writeMapHash(m) + + return hw.hashSum128() +} + +// ValueHash return a hash for the provided pcommon.Value. +func ValueHash(v pcommon.Value) [16]byte { + hw := hashWriterPool.Get().(*hashWriter) + defer hashWriterPool.Put(hw) + hw.byteBuf = hw.byteBuf[:0] + + hw.writeValueHash(v) + + return hw.hashSum128() +} + +func (hw *hashWriter) writeMapHash(m pcommon.Map) { + // For each recursive call into this function we want to preserve the previous buffer state + // while also adding new keys to the buffer. nextIndex is the index of the first new key + // added to the buffer for this call of the function. + // This also works for the first non-recursive call of this function because the buffer is always empty + // on the first call due to it being cleared of any added keys at then end of the function. + nextIndex := len(hw.keysBuf) + + m.Range(func(k string, _ pcommon.Value) bool { + hw.keysBuf = append(hw.keysBuf, k) + return true + }) + + // Get only the newly added keys from the buffer by slicing the buffer from nextIndex to the end + workingKeySet := hw.keysBuf[nextIndex:] + + sort.Strings(workingKeySet) + for _, k := range workingKeySet { + v, _ := m.Get(k) + hw.byteBuf = append(hw.byteBuf, keyPrefix...) + hw.byteBuf = append(hw.byteBuf, k...) + hw.writeValueHash(v) + } + + // Remove all keys that were added to the buffer during this call of the function + hw.keysBuf = hw.keysBuf[:nextIndex] +} + +func (hw *hashWriter) writeValueHash(v pcommon.Value) { + switch v.Type() { + case pcommon.ValueTypeStr: + hw.writeString(v.Str()) + case pcommon.ValueTypeBool: + if v.Bool() { + hw.byteBuf = append(hw.byteBuf, valBoolTrue...) + } else { + hw.byteBuf = append(hw.byteBuf, valBoolFalse...) + } + case pcommon.ValueTypeInt: + hw.byteBuf = append(hw.byteBuf, valIntPrefix...) + hw.byteBuf = binary.LittleEndian.AppendUint64(hw.byteBuf, uint64(v.Int())) + case pcommon.ValueTypeDouble: + hw.byteBuf = append(hw.byteBuf, valDoublePrefix...) + hw.byteBuf = binary.LittleEndian.AppendUint64(hw.byteBuf, math.Float64bits(v.Double())) + case pcommon.ValueTypeMap: + hw.byteBuf = append(hw.byteBuf, valMapPrefix...) + hw.writeMapHash(v.Map()) + hw.byteBuf = append(hw.byteBuf, valMapSuffix...) + case pcommon.ValueTypeSlice: + sl := v.Slice() + hw.byteBuf = append(hw.byteBuf, valSlicePrefix...) + for i := 0; i < sl.Len(); i++ { + hw.writeValueHash(sl.At(i)) + } + hw.byteBuf = append(hw.byteBuf, valSliceSuffix...) + case pcommon.ValueTypeBytes: + hw.byteBuf = append(hw.byteBuf, valBytesPrefix...) + hw.byteBuf = append(hw.byteBuf, v.Bytes().AsRaw()...) + case pcommon.ValueTypeEmpty: + hw.byteBuf = append(hw.byteBuf, valEmpty...) + } +} + +func (hw *hashWriter) writeString(s string) { + hw.byteBuf = append(hw.byteBuf, valStrPrefix...) + hw.byteBuf = append(hw.byteBuf, s...) +} + +// hashSum128 returns a [16]byte hash sum. +func (hw *hashWriter) hashSum128() [16]byte { + r := [16]byte{} + res := r[:] + + h := xxhash.Sum64(hw.byteBuf) + res = binary.LittleEndian.AppendUint64(res[:0], h) + + // Append an extra byte to generate another part of the hash sum + hw.byteBuf = append(hw.byteBuf, extraByte...) + h = xxhash.Sum64(hw.byteBuf) + _ = binary.LittleEndian.AppendUint64(res[8:], h) + + return r +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/metadata.yaml new file mode 100644 index 00000000000..f726a58cdfc --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/metadata.yaml @@ -0,0 +1,3 @@ +status: + codeowners: + active: [dmitryax] \ No newline at end of file diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/LICENSE b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/Makefile b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/Makefile new file mode 100644 index 00000000000..c1496226e59 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common \ No newline at end of file diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/README.md new file mode 100644 index 00000000000..04548d5f7f8 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/README.md @@ -0,0 +1,42 @@ +# Delta to cumulative processor + + +| Status | | +| ------------- |-----------| +| Stability | [alpha]: metrics | +| Distributions | [contrib], [k8s] | +| Warnings | [Statefulness](#warnings) | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aprocessor%2Fdeltatocumulative%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aprocessor%2Fdeltatocumulative) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aprocessor%2Fdeltatocumulative%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aprocessor%2Fdeltatocumulative) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@sh0rez](https://www.github.com/sh0rez), [@RichieSams](https://www.github.com/RichieSams) | + +[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s + + + +## Description + +The delta to cumulative processor (`deltatocumulativeprocessor`) converts +metrics from delta temporality to cumulative, by accumulating samples in memory. + +## Configuration + +``` yaml +processors: + deltatocumulative: + # how long until a series not receiving new samples is removed + [ max_stale: | default = 5m ] + + # upper limit of streams to track. new streams exceeding this limit + # will be dropped + [ max_streams: | default = 9223372036854775807 (max int) ] + +``` + +There is no further configuration required. All delta samples are converted to cumulative. + +## Troubleshooting + +When [Telemetry is +enabled](https://opentelemetry.io/docs/collector/configuration/#telemetry), this component exports [several metrics](./documentation.md). diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/config.go new file mode 100644 index 00000000000..f5b5c1c59df --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/config.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package deltatocumulativeprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor" + +import ( + "context" + "fmt" + "math" + "time" + + "go.opentelemetry.io/collector/component" + + telemetry "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry" +) + +var _ component.ConfigValidator = (*Config)(nil) + +type Config struct { + MaxStale time.Duration `mapstructure:"max_stale"` + MaxStreams int `mapstructure:"max_streams"` +} + +func (c *Config) Validate() error { + if c.MaxStale <= 0 { + return fmt.Errorf("max_stale must be a positive duration (got %s)", c.MaxStale) + } + if c.MaxStreams < 0 { + return fmt.Errorf("max_streams must be a positive number (got %d)", c.MaxStreams) + } + return nil +} + +func createDefaultConfig() component.Config { + return &Config{ + MaxStale: 5 * time.Minute, + + // TODO: find good default + // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/31603 + MaxStreams: math.MaxInt, + } +} + +func (c Config) Metrics(tel telemetry.Metrics) { + ctx := context.Background() + tel.DeltatocumulativeStreamsMaxStale.Record(ctx, int64(c.MaxStale.Seconds())) + tel.DeltatocumulativeStreamsLimit.Record(ctx, int64(c.MaxStreams)) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/doc.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/doc.go new file mode 100644 index 00000000000..c8f961f6bbe --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +// package deltatocumulativeprocessor implements a processor which +// converts metrics from delta temporality to cumulative. +package deltatocumulativeprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md new file mode 100644 index 00000000000..f9c560a8703 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md @@ -0,0 +1,79 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# deltatocumulative + +## Internal Telemetry + +The following telemetry is emitted by this component. + +### otelcol_deltatocumulative.datapoints.dropped + +number of datapoints dropped due to given 'reason' + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| {datapoint} | Sum | Int | true | + +### otelcol_deltatocumulative.datapoints.linear + +total number of datapoints processed. may have 'error' attribute, if processing failed + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| {datapoint} | Sum | Int | true | + +### otelcol_deltatocumulative.datapoints.processed + +number of datapoints processed + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| {datapoint} | Sum | Int | true | + +### otelcol_deltatocumulative.gaps.length + +total duration where data was expected but not received + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| s | Sum | Int | true | + +### otelcol_deltatocumulative.streams.evicted + +number of streams evicted + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| {stream} | Sum | Int | true | + +### otelcol_deltatocumulative.streams.limit + +upper limit of tracked streams + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {stream} | Gauge | Int | + +### otelcol_deltatocumulative.streams.max_stale + +duration after which streams inactive streams are dropped + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| s | Gauge | Int | + +### otelcol_deltatocumulative.streams.tracked + +number of streams tracked + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| {dps} | Sum | Int | false | + +### otelcol_deltatocumulative.streams.tracked.linear + +number of streams tracked + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| {dps} | Sum | Int | false | diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/factory.go new file mode 100644 index 00000000000..904ae1ee682 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/factory.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package deltatocumulativeprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry" +) + +func NewFactory() processor.Factory { + return processor.NewFactory( + metadata.Type, + createDefaultConfig, + processor.WithMetrics(createMetricsProcessor, metadata.MetricsStability), + ) +} + +func createMetricsProcessor(_ context.Context, set processor.Settings, cfg component.Config, next consumer.Metrics) (processor.Metrics, error) { + pcfg, ok := cfg.(*Config) + if !ok { + return nil, fmt.Errorf("configuration parsing error") + } + + tel, err := telemetry.New(set.TelemetrySettings) + if err != nil { + return nil, err + } + + return newProcessor(pcfg, tel, next), nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go new file mode 100644 index 00000000000..33c2f283c84 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package data // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data" + +import ( + "math" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/putil/pslice" +) + +func (dp Number) Add(in Number) Number { + switch in.ValueType() { + case pmetric.NumberDataPointValueTypeDouble: + v := dp.DoubleValue() + in.DoubleValue() + dp.SetDoubleValue(v) + case pmetric.NumberDataPointValueTypeInt: + v := dp.IntValue() + in.IntValue() + dp.SetIntValue(v) + } + dp.SetTimestamp(in.Timestamp()) + return dp +} + +func (dp Histogram) Add(in Histogram) Histogram { + // bounds different: no way to merge, so reset observation to new boundaries + if !pslice.Equal(dp.ExplicitBounds(), in.ExplicitBounds()) { + in.MoveTo(dp.HistogramDataPoint) + return dp + } + + // spec requires len(BucketCounts) == len(ExplicitBounds)+1. + // given we have limited error handling at this stage (and already verified boundaries are correct), + // doing a best-effort add of whatever we have appears reasonable. + n := min(dp.BucketCounts().Len(), in.BucketCounts().Len()) + for i := 0; i < n; i++ { + sum := dp.BucketCounts().At(i) + in.BucketCounts().At(i) + dp.BucketCounts().SetAt(i, sum) + } + + dp.SetTimestamp(in.Timestamp()) + dp.SetCount(dp.Count() + in.Count()) + + if dp.HasSum() && in.HasSum() { + dp.SetSum(dp.Sum() + in.Sum()) + } else { + dp.RemoveSum() + } + + if dp.HasMin() && in.HasMin() { + dp.SetMin(math.Min(dp.Min(), in.Min())) + } else { + dp.RemoveMin() + } + + if dp.HasMax() && in.HasMax() { + dp.SetMax(math.Max(dp.Max(), in.Max())) + } else { + dp.RemoveMax() + } + + return dp +} + +func (dp ExpHistogram) Add(in ExpHistogram) ExpHistogram { + type H = ExpHistogram + + if dp.Scale() != in.Scale() { + hi, lo := expo.HiLo(dp, in, H.Scale) + from, to := expo.Scale(hi.Scale()), expo.Scale(lo.Scale()) + expo.Downscale(hi.Positive(), from, to) + expo.Downscale(hi.Negative(), from, to) + hi.SetScale(lo.Scale()) + } + + if dp.ZeroThreshold() != in.ZeroThreshold() { + hi, lo := expo.HiLo(dp, in, H.ZeroThreshold) + expo.WidenZero(lo.DataPoint, hi.ZeroThreshold()) + } + + expo.Merge(dp.Positive(), in.Positive()) + expo.Merge(dp.Negative(), in.Negative()) + + dp.SetTimestamp(in.Timestamp()) + dp.SetCount(dp.Count() + in.Count()) + dp.SetZeroCount(dp.ZeroCount() + in.ZeroCount()) + + if dp.HasSum() && in.HasSum() { + dp.SetSum(dp.Sum() + in.Sum()) + } else { + dp.RemoveSum() + } + + if dp.HasMin() && in.HasMin() { + dp.SetMin(math.Min(dp.Min(), in.Min())) + } else { + dp.RemoveMin() + } + + if dp.HasMax() && in.HasMax() { + dp.SetMax(math.Max(dp.Max(), in.Max())) + } else { + dp.RemoveMax() + } + + return dp +} + +func (dp Summary) Add(Summary) Summary { + panic("todo") +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/data.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/data.go new file mode 100644 index 00000000000..3a36f6d552d --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/data.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package data // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data" + +import ( + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" +) + +type Number struct { + pmetric.NumberDataPoint +} + +type Histogram struct { + pmetric.HistogramDataPoint +} + +type ExpHistogram struct { + expo.DataPoint +} + +type Summary struct { + pmetric.SummaryDataPoint +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/expo.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/expo.go new file mode 100644 index 00000000000..2011e3cd811 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/expo.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package expo implements various operations on exponential histograms and their bucket counts +package expo // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + +import "go.opentelemetry.io/collector/pdata/pmetric" + +type ( + DataPoint = pmetric.ExponentialHistogramDataPoint + Buckets = pmetric.ExponentialHistogramDataPointBuckets +) + +// Abs returns a view into the buckets using an absolute scale +func Abs(bs Buckets) Absolute { + return Absolute{buckets: bs} +} + +type buckets = Buckets + +// Absolute addresses bucket counts using an absolute scale, such that it is +// interoperable with [Scale]. +// +// It spans from [[Absolute.Lower]:[Absolute.Upper]] +// +// NOTE: The zero-value is unusable, use [Abs] to construct +type Absolute struct { + buckets +} + +// Abs returns the value at absolute index 'at' +func (a Absolute) Abs(at int) uint64 { + if i, ok := a.idx(at); ok { + return a.BucketCounts().At(i) + } + return 0 +} + +// Upper returns the minimal index outside the set, such that every i < Upper +func (a Absolute) Upper() int { + return a.BucketCounts().Len() + int(a.Offset()) +} + +// Lower returns the minimal index inside the set, such that every i >= Lower +func (a Absolute) Lower() int { + return int(a.Offset()) +} + +func (a Absolute) idx(at int) (int, bool) { + idx := at - a.Lower() + return idx, idx >= 0 && idx < a.BucketCounts().Len() +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/merge.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/merge.go new file mode 100644 index 00000000000..150e29a6581 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/merge.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expo // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Merge combines the counts of buckets a and b into a. +// Both buckets MUST be of same scale +func Merge(arel, brel Buckets) { + if brel.BucketCounts().Len() == 0 { + return + } + if arel.BucketCounts().Len() == 0 { + brel.CopyTo(arel) + return + } + + a, b := Abs(arel), Abs(brel) + + lo := min(a.Lower(), b.Lower()) + up := max(a.Upper(), b.Upper()) + + size := up - lo + + counts := pcommon.NewUInt64Slice() + counts.Append(make([]uint64, size-counts.Len())...) + + for i := 0; i < counts.Len(); i++ { + counts.SetAt(i, a.Abs(lo+i)+b.Abs(lo+i)) + } + + a.SetOffset(int32(lo)) + counts.MoveTo(a.BucketCounts()) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/ord.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/ord.go new file mode 100644 index 00000000000..34d177be179 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/ord.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expo // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + +import "cmp" + +// HiLo returns the greater of a and b by comparing the result of applying fn to +// each. If equal, returns operands as passed +func HiLo[T any, N cmp.Ordered](a, b T, fn func(T) N) (hi, lo T) { + an, bn := fn(a), fn(b) + if cmp.Less(an, bn) { + return b, a + } + return a, b +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/scale.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/scale.go new file mode 100644 index 00000000000..5201806fb82 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/scale.go @@ -0,0 +1,115 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expo // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + +import ( + "fmt" + "math" +) + +type Scale int32 + +// Idx gives the bucket index v belongs into +func (scale Scale) Idx(v float64) int { + // from: https://opentelemetry.io/docs/specs/otel/metrics/data-model/#all-scales-use-the-logarithm-function + + // Special case for power-of-two values. + if frac, exp := math.Frexp(v); frac == 0.5 { + return ((exp - 1) << scale) - 1 + } + + scaleFactor := math.Ldexp(math.Log2E, int(scale)) + // Note: math.Floor(value) equals math.Ceil(value)-1 when value + // is not a power of two, which is checked above. + return int(math.Floor(math.Log(v) * scaleFactor)) +} + +// Bounds returns the half-open interval (min,max] of the bucket at index. +// This means a value min < v <= max belongs to this bucket. +// +// NOTE: this is different from Go slice intervals, which are [a,b) +func (scale Scale) Bounds(index int) (min, max float64) { + // from: https://opentelemetry.io/docs/specs/otel/metrics/data-model/#all-scales-use-the-logarithm-function + lower := func(index int) float64 { + inverseFactor := math.Ldexp(math.Ln2, int(-scale)) + return math.Exp(float64(index) * inverseFactor) + } + + return lower(index), lower(index + 1) +} + +// Downscale collapses the buckets of bs until scale 'to' is reached +func Downscale(bs Buckets, from, to Scale) { + switch { + case from == to: + return + case from < to: + // because even distribution within the buckets cannot be assumed, it is + // not possible to correctly upscale (split) buckets. + // any attempt to do so would yield erronous data. + panic(fmt.Sprintf("cannot upscale without introducing error (%d -> %d)", from, to)) + } + + for at := from; at > to; at-- { + Collapse(bs) + } +} + +// Collapse merges adjacent buckets and zeros the remaining area: +// +// before: 1 1 1 1 1 1 1 1 1 1 1 1 +// after: 2 2 2 2 2 2 0 0 0 0 0 0 +// +// Due to the "perfect subsetting" property of exponential histograms, this +// gives the same observation as before, but recorded at scale-1. See +// https://opentelemetry.io/docs/specs/otel/metrics/data-model/#exponential-scale. +// +// Because every bucket now spans twice as much range, half of the allocated +// counts slice is technically no longer required. It is zeroed but left in +// place to avoid future allocations, because observations may happen in that +// area at a later time. +func Collapse(bs Buckets) { + counts := bs.BucketCounts() + size := counts.Len() / 2 + if counts.Len()%2 != 0 || bs.Offset()%2 != 0 { + size++ + } + + // merging needs to happen in pairs aligned to i=0. if offset is non-even, + // we need to shift the whole merging by one to make above condition true. + shift := 0 + if bs.Offset()%2 != 0 { + bs.SetOffset(bs.Offset() - 1) + shift-- + } + bs.SetOffset(bs.Offset() / 2) + + for i := 0; i < size; i++ { + // size is ~half of len. we add two buckets per iteration. + // k jumps in steps of 2, shifted if offset makes this necessary. + k := i*2 + shift + + // special case: we just started and had to shift. the left half of the + // new bucket is not actually stored, so only use counts[0]. + if i == 0 && k == -1 { + counts.SetAt(i, counts.At(k+1)) + continue + } + + // new[k] = old[k]+old[k+1] + counts.SetAt(i, counts.At(k)) + if k+1 < counts.Len() { + counts.SetAt(i, counts.At(k)+counts.At(k+1)) + } + } + + // zero the excess area. its not needed to represent the observation + // anymore, but kept for two reasons: + // 1. future observations may need it, no need to re-alloc then if kept + // 2. [pcommon.Uint64Slice] can not, in fact, be sliced, so getting rid + // of it would alloc ¯\_(ツ)_/¯ + for i := size; i < counts.Len(); i++ { + counts.SetAt(i, 0) + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/zero.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/zero.go new file mode 100644 index 00000000000..2d5401b39f5 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/zero.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expo // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + +import ( + "cmp" + "fmt" +) + +// WidenZero widens the zero-bucket to span at least [-width,width], possibly wider +// if min falls in the middle of a bucket. +// +// Both buckets counts MUST be of same scale. +func WidenZero(dp DataPoint, width float64) { + switch { + case width == dp.ZeroThreshold(): + return + case width < dp.ZeroThreshold(): + panic(fmt.Sprintf("min must be larger than current threshold (%f)", dp.ZeroThreshold())) + } + + scale := Scale(dp.Scale()) + zero := scale.Idx(width) // the largest bucket index inside the zero width + + widen := func(bs Buckets) { + abs := Abs(bs) + for i := abs.Lower(); i <= zero; i++ { + dp.SetZeroCount(dp.ZeroCount() + abs.Abs(i)) + } + + // right next to the new zero bucket, constrained to slice range + lo := clamp(zero+1, abs.Lower(), abs.Upper()) + abs.Slice(lo, abs.Upper()) + } + + widen(dp.Positive()) + widen(dp.Negative()) + + _, max := scale.Bounds(zero) + dp.SetZeroThreshold(max) +} + +// Slice drops data outside the range from <= i < to from the bucket counts. It behaves the same as Go's [a:b] +// +// Limitations: +// - due to a limitation of the pcommon package, slicing cannot happen in-place and allocates +// - in consequence, data outside the range is garbage collected +func (a Absolute) Slice(from, to int) { + lo, up := a.Lower(), a.Upper() + switch { + case from > to: + panic(fmt.Sprintf("bad bounds: must be from<=to (got %d<=%d)", from, to)) + case from < lo || to > up: + panic(fmt.Sprintf("%d:%d is out of bounds for %d:%d", from, to, lo, up)) + } + + first := from - lo + last := to - lo + + a.BucketCounts().FromRaw(a.BucketCounts().AsRaw()[first:last]) + a.SetOffset(int32(from)) +} + +// clamp constraints v to the range up..=lo +func clamp[N cmp.Ordered](v, lo, up N) N { + return max(lo, min(v, up)) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta/delta.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta/delta.go new file mode 100644 index 00000000000..f2a759e9bfe --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta/delta.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package delta // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta" + +import ( + "fmt" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data" +) + +type ErrOlderStart struct { + Start pcommon.Timestamp + Sample pcommon.Timestamp +} + +func (e ErrOlderStart) Error() string { + return fmt.Sprintf("dropped sample with start_time=%s, because series only starts at start_time=%s. consider checking for multiple processes sending the exact same series", e.Sample, e.Start) +} + +type ErrOutOfOrder struct { + Last pcommon.Timestamp + Sample pcommon.Timestamp +} + +func (e ErrOutOfOrder) Error() string { + return fmt.Sprintf("out of order: dropped sample from time=%s, because series is already at time=%s", e.Sample, e.Last) +} + +type Type interface { + pmetric.NumberDataPoint | pmetric.HistogramDataPoint | pmetric.ExponentialHistogramDataPoint + + StartTimestamp() pcommon.Timestamp + Timestamp() pcommon.Timestamp +} + +// AccumulateInto adds state and dp, storing the result in state +// +// state = state + dp +func AccumulateInto[T Type](state, dp T) error { + switch { + case dp.StartTimestamp() < state.StartTimestamp(): + // belongs to older series + return ErrOlderStart{Start: state.StartTimestamp(), Sample: dp.StartTimestamp()} + case dp.Timestamp() <= state.Timestamp(): + // out of order + return ErrOutOfOrder{Last: state.Timestamp(), Sample: dp.Timestamp()} + } + + switch dp := any(dp).(type) { + case pmetric.NumberDataPoint: + state := any(state).(pmetric.NumberDataPoint) + data.Number{NumberDataPoint: state}.Add(data.Number{NumberDataPoint: dp}) + case pmetric.HistogramDataPoint: + state := any(state).(pmetric.HistogramDataPoint) + data.Histogram{HistogramDataPoint: state}.Add(data.Histogram{HistogramDataPoint: dp}) + case pmetric.ExponentialHistogramDataPoint: + state := any(state).(pmetric.ExponentialHistogramDataPoint) + data.ExpHistogram{DataPoint: state}.Add(data.ExpHistogram{DataPoint: dp}) + } + return nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_status.go new file mode 100644 index 00000000000..25a4f5a32ff --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_status.go @@ -0,0 +1,16 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" +) + +var ( + Type = component.MustNewType("deltatocumulative") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor" +) + +const ( + MetricsStability = component.StabilityLevelAlpha +) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go new file mode 100644 index 00000000000..82a4476ba92 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go @@ -0,0 +1,135 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor") +} + +// TelemetryBuilder provides an interface for components to report telemetry +// as defined in metadata and user config. +type TelemetryBuilder struct { + meter metric.Meter + DeltatocumulativeDatapointsDropped metric.Int64Counter + DeltatocumulativeDatapointsLinear metric.Int64Counter + DeltatocumulativeDatapointsProcessed metric.Int64Counter + DeltatocumulativeGapsLength metric.Int64Counter + DeltatocumulativeStreamsEvicted metric.Int64Counter + DeltatocumulativeStreamsLimit metric.Int64Gauge + DeltatocumulativeStreamsMaxStale metric.Int64Gauge + DeltatocumulativeStreamsTracked metric.Int64UpDownCounter + DeltatocumulativeStreamsTrackedLinear metric.Int64ObservableUpDownCounter + observeDeltatocumulativeStreamsTrackedLinear func(context.Context, metric.Observer) error +} + +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} + +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) +} + +// WithDeltatocumulativeStreamsTrackedLinearCallback sets callback for observable DeltatocumulativeStreamsTrackedLinear metric. +func WithDeltatocumulativeStreamsTrackedLinearCallback(cb func() int64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeDeltatocumulativeStreamsTrackedLinear = func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.DeltatocumulativeStreamsTrackedLinear, cb(), opts...) + return nil + } + }) +} + +// NewTelemetryBuilder provides a struct with methods to update all internal telemetry +// for a component +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{} + for _, op := range options { + op.apply(&builder) + } + builder.meter = Meter(settings) + var err, errs error + builder.DeltatocumulativeDatapointsDropped, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_deltatocumulative.datapoints.dropped", + metric.WithDescription("number of datapoints dropped due to given 'reason'"), + metric.WithUnit("{datapoint}"), + ) + errs = errors.Join(errs, err) + builder.DeltatocumulativeDatapointsLinear, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_deltatocumulative.datapoints.linear", + metric.WithDescription("total number of datapoints processed. may have 'error' attribute, if processing failed"), + metric.WithUnit("{datapoint}"), + ) + errs = errors.Join(errs, err) + builder.DeltatocumulativeDatapointsProcessed, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_deltatocumulative.datapoints.processed", + metric.WithDescription("number of datapoints processed"), + metric.WithUnit("{datapoint}"), + ) + errs = errors.Join(errs, err) + builder.DeltatocumulativeGapsLength, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_deltatocumulative.gaps.length", + metric.WithDescription("total duration where data was expected but not received"), + metric.WithUnit("s"), + ) + errs = errors.Join(errs, err) + builder.DeltatocumulativeStreamsEvicted, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_deltatocumulative.streams.evicted", + metric.WithDescription("number of streams evicted"), + metric.WithUnit("{stream}"), + ) + errs = errors.Join(errs, err) + builder.DeltatocumulativeStreamsLimit, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Gauge( + "otelcol_deltatocumulative.streams.limit", + metric.WithDescription("upper limit of tracked streams"), + metric.WithUnit("{stream}"), + ) + errs = errors.Join(errs, err) + builder.DeltatocumulativeStreamsMaxStale, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Gauge( + "otelcol_deltatocumulative.streams.max_stale", + metric.WithDescription("duration after which streams inactive streams are dropped"), + metric.WithUnit("s"), + ) + errs = errors.Join(errs, err) + builder.DeltatocumulativeStreamsTracked, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64UpDownCounter( + "otelcol_deltatocumulative.streams.tracked", + metric.WithDescription("number of streams tracked"), + metric.WithUnit("{dps}"), + ) + errs = errors.Join(errs, err) + builder.DeltatocumulativeStreamsTrackedLinear, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64ObservableUpDownCounter( + "otelcol_deltatocumulative.streams.tracked.linear", + metric.WithDescription("number of streams tracked"), + metric.WithUnit("{dps}"), + ) + errs = errors.Join(errs, err) + _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeDeltatocumulativeStreamsTrackedLinear, builder.DeltatocumulativeStreamsTrackedLinear) + errs = errors.Join(errs, err) + return &builder, errs +} + +func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { + if cfgLevel <= srvLevel { + return meter + } + return noop.Meter{} +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/data.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/data.go new file mode 100644 index 00000000000..cb9b2c0ef73 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/data.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics" + +import ( + "go.opentelemetry.io/collector/pdata/pmetric" +) + +type Sum Metric + +func (s Sum) Len() int { + return Metric(s).Sum().DataPoints().Len() +} + +func (s Sum) Ident() Ident { + return (*Metric)(&s).Ident() +} + +func (s Sum) SetAggregationTemporality(at pmetric.AggregationTemporality) { + s.Sum().SetAggregationTemporality(at) +} + +type Histogram Metric + +func (s Histogram) Len() int { + return Metric(s).Histogram().DataPoints().Len() +} + +func (s Histogram) Ident() Ident { + return (*Metric)(&s).Ident() +} + +func (s Histogram) SetAggregationTemporality(at pmetric.AggregationTemporality) { + s.Histogram().SetAggregationTemporality(at) +} + +type ExpHistogram Metric + +func (s ExpHistogram) Len() int { + return Metric(s).ExponentialHistogram().DataPoints().Len() +} + +func (s ExpHistogram) Ident() Ident { + return (*Metric)(&s).Ident() +} + +func (s ExpHistogram) SetAggregationTemporality(at pmetric.AggregationTemporality) { + s.ExponentialHistogram().SetAggregationTemporality(at) +} + +type Gauge Metric + +func (s Gauge) Len() int { + return Metric(s).Gauge().DataPoints().Len() +} + +func (s Gauge) Ident() Ident { + return (*Metric)(&s).Ident() +} + +func (s Gauge) SetAggregationTemporality(pmetric.AggregationTemporality) {} + +type Summary Metric + +func (s Summary) Len() int { + return Metric(s).Summary().DataPoints().Len() +} + +func (s Summary) Ident() Ident { + return (*Metric)(&s).Ident() +} + +func (s Summary) SetAggregationTemporality(pmetric.AggregationTemporality) {} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/iter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/iter.go new file mode 100644 index 00000000000..29d94051bfa --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/iter.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics" + +import ( + "go.opentelemetry.io/collector/pdata/pmetric" +) + +func Filter(md pmetric.Metrics, keep func(Metric) bool) { + md.ResourceMetrics().RemoveIf(func(rm pmetric.ResourceMetrics) bool { + rm.ScopeMetrics().RemoveIf(func(sm pmetric.ScopeMetrics) bool { + sm.Metrics().RemoveIf(func(m pmetric.Metric) bool { + return !keep(From(rm.Resource(), sm.Scope(), m)) + }) + return sm.Metrics().Len() == 0 + }) + return rm.ScopeMetrics().Len() == 0 + }) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/metrics.go new file mode 100644 index 00000000000..d65e3b8341a --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/metrics.go @@ -0,0 +1,110 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics" + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" +) + +type Ident = identity.Metric + +type Metric struct { + res pcommon.Resource + scope pcommon.InstrumentationScope + pmetric.Metric +} + +func (m *Metric) Ident() Ident { + return identity.OfResourceMetric(m.res, m.scope, m.Metric) +} + +func (m *Metric) Resource() pcommon.Resource { + return m.res +} + +func (m *Metric) Scope() pcommon.InstrumentationScope { + return m.scope +} + +func From(res pcommon.Resource, scope pcommon.InstrumentationScope, metric pmetric.Metric) Metric { + return Metric{res: res, scope: scope, Metric: metric} +} + +func (m Metric) AggregationTemporality() pmetric.AggregationTemporality { + switch m.Type() { + case pmetric.MetricTypeSum: + return m.Sum().AggregationTemporality() + case pmetric.MetricTypeHistogram: + return m.Histogram().AggregationTemporality() + case pmetric.MetricTypeExponentialHistogram: + return m.ExponentialHistogram().AggregationTemporality() + } + + return pmetric.AggregationTemporalityUnspecified +} + +func (m Metric) Typed() Any { + //exhaustive:enforce + switch m.Type() { + case pmetric.MetricTypeSum: + return Sum(m) + case pmetric.MetricTypeGauge: + return Gauge(m) + case pmetric.MetricTypeExponentialHistogram: + return ExpHistogram(m) + case pmetric.MetricTypeHistogram: + return Histogram(m) + case pmetric.MetricTypeSummary: + return Summary(m) + } + panic("unreachable") +} + +var ( + _ Any = Sum{} + _ Any = Gauge{} + _ Any = ExpHistogram{} + _ Any = Histogram{} + _ Any = Summary{} +) + +type Any interface { + Len() int + Ident() identity.Metric + SetAggregationTemporality(pmetric.AggregationTemporality) +} + +func (m Metric) Filter(ok func(id identity.Stream, dp any) bool) { + mid := m.Ident() + switch m.Type() { + case pmetric.MetricTypeSum: + m.Sum().DataPoints().RemoveIf(func(dp pmetric.NumberDataPoint) bool { + id := identity.OfStream(mid, dp) + return !ok(id, dp) + }) + case pmetric.MetricTypeGauge: + m.Gauge().DataPoints().RemoveIf(func(dp pmetric.NumberDataPoint) bool { + id := identity.OfStream(mid, dp) + return !ok(id, dp) + }) + case pmetric.MetricTypeHistogram: + m.Histogram().DataPoints().RemoveIf(func(dp pmetric.HistogramDataPoint) bool { + id := identity.OfStream(mid, dp) + return !ok(id, dp) + }) + case pmetric.MetricTypeExponentialHistogram: + m.ExponentialHistogram().DataPoints().RemoveIf(func(dp pmetric.ExponentialHistogramDataPoint) bool { + id := identity.OfStream(mid, dp) + return !ok(id, dp) + }) + case pmetric.MetricTypeSummary: + m.Summary().DataPoints().RemoveIf(func(dp pmetric.SummaryDataPoint) bool { + id := identity.OfStream(mid, dp) + return !ok(id, dp) + }) + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/putil/pslice/pslice.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/putil/pslice/pslice.go new file mode 100644 index 00000000000..6cc97af0413 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/putil/pslice/pslice.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pslice // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/putil/pslice" + +type Slice[E any] interface { + At(int) E + Len() int +} + +func Equal[E comparable, S Slice[E]](a, b S) bool { + if a.Len() != b.Len() { + return false + } + for i := 0; i < a.Len(); i++ { + if a.At(i) != b.At(i) { + return false + } + } + return true +} + +func All[E any, S Slice[E]](slice S) func(func(E) bool) { + return func(yield func(E) bool) { + for i := 0; i < slice.Len(); i++ { + if !yield(slice.At(i)) { + break + } + } + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/attr.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/attr.go new file mode 100644 index 00000000000..ccfac6553fc --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/attr.go @@ -0,0 +1,12 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry" + +import "go.opentelemetry.io/otel/attribute" + +type Attributes []attribute.KeyValue + +func (a *Attributes) Set(attr attribute.KeyValue) { + *a = append(*a, attr) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go new file mode 100644 index 00000000000..ab6fde65500 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry" + +import ( + "context" + "errors" + "reflect" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata" +) + +func New(set component.TelemetrySettings) (Metrics, error) { + zero := func() int { return -1 } + m := Metrics{ + tracked: &zero, + } + + trackedCb := metadata.WithDeltatocumulativeStreamsTrackedLinearCallback(func() int64 { + return int64((*m.tracked)()) + }) + + telb, err := metadata.NewTelemetryBuilder(set, trackedCb) + if err != nil { + return Metrics{}, err + } + m.TelemetryBuilder = *telb + + return m, nil +} + +type Metrics struct { + metadata.TelemetryBuilder + + tracked *func() int +} + +func (m Metrics) Datapoints() Counter { + return Counter{Int64Counter: m.DeltatocumulativeDatapointsLinear} +} + +func (m *Metrics) WithTracked(streams func() int) { + *m.tracked = streams +} + +func Error(msg string) attribute.KeyValue { + return attribute.String("error", msg) +} + +func Cause(err error) attribute.KeyValue { + for { + uw := errors.Unwrap(err) + if uw == nil { + break + } + err = uw + } + + return Error(reflect.TypeOf(err).String()) +} + +type Counter struct{ metric.Int64Counter } + +func (c Counter) Inc(ctx context.Context, attrs ...attribute.KeyValue) { + c.Add(ctx, 1, metric.WithAttributes(attrs...)) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml new file mode 100644 index 00000000000..be925197db8 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml @@ -0,0 +1,78 @@ +type: deltatocumulative + +status: + class: processor + stability: + alpha: [metrics] + distributions: [contrib, k8s] + warnings: [Statefulness] + codeowners: + active: [sh0rez, RichieSams] + +telemetry: + metrics: + # streams + deltatocumulative.streams.tracked: + description: number of streams tracked + unit: "{dps}" + sum: + value_type: int + monotonic: false + enabled: true + deltatocumulative.streams.tracked.linear: + description: number of streams tracked + unit: "{dps}" + sum: + value_type: int + monotonic: false + async: true + enabled: true + deltatocumulative.streams.limit: + description: upper limit of tracked streams + unit: "{stream}" + gauge: + value_type: int + enabled: true + deltatocumulative.streams.evicted: + description: number of streams evicted + unit: "{stream}" + sum: + value_type: int + monotonic: true + enabled: true + deltatocumulative.streams.max_stale: + description: duration after which streams inactive streams are dropped + unit: "s" + gauge: + value_type: int + enabled: true + # datapoints + deltatocumulative.datapoints.processed: + description: number of datapoints processed + unit: "{datapoint}" + sum: + value_type: int + monotonic: true + enabled: true + deltatocumulative.datapoints.dropped: + description: number of datapoints dropped due to given 'reason' + unit: "{datapoint}" + sum: + value_type: int + monotonic: true + enabled: true + + deltatocumulative.datapoints.linear: + description: total number of datapoints processed. may have 'error' attribute, if processing failed + unit: "{datapoint}" + sum: + value_type: int + monotonic: true + enabled: true + deltatocumulative.gaps.length: + description: total duration where data was expected but not received + unit: "s" + sum: + value_type: int + monotonic: true + enabled: true diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go new file mode 100644 index 00000000000..149431b8970 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go @@ -0,0 +1,214 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package deltatocumulativeprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor" + +import ( + "context" + "sync" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/processor" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry" +) + +var _ processor.Metrics = (*Processor)(nil) + +type Processor struct { + next consumer.Metrics + cfg Config + + last state + mtx sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + stale staleness.Tracker + tel telemetry.Metrics +} + +func newProcessor(cfg *Config, tel telemetry.Metrics, next consumer.Metrics) *Processor { + ctx, cancel := context.WithCancel(context.Background()) + + proc := Processor{ + next: next, + cfg: *cfg, + last: state{ + nums: make(map[identity.Stream]pmetric.NumberDataPoint), + hist: make(map[identity.Stream]pmetric.HistogramDataPoint), + expo: make(map[identity.Stream]pmetric.ExponentialHistogramDataPoint), + }, + ctx: ctx, + cancel: cancel, + + stale: staleness.NewTracker(), + tel: tel, + } + + tel.WithTracked(proc.last.Len) + cfg.Metrics(tel) + + return &proc +} + +func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { + p.mtx.Lock() + defer p.mtx.Unlock() + + now := time.Now() + + const ( + keep = true + drop = false + ) + + metrics.Filter(md, func(m metrics.Metric) bool { + if m.AggregationTemporality() != pmetric.AggregationTemporalityDelta { + return keep + } + + // aggregate the datapoints. + // using filter here, as the pmetric.*DataPoint are reference types so + // we can modify them using their "value". + m.Filter(func(id identity.Stream, dp any) bool { + // count the processed datatype. + // uses whatever value of attrs has at return-time + var attrs telemetry.Attributes + defer func() { p.tel.Datapoints().Inc(ctx, attrs...) }() + + // if stream new and state capacity reached, reject + exist := p.last.Has(id) + if !exist && p.last.Len() >= p.cfg.MaxStreams { + attrs.Set(telemetry.Error("limit")) + return drop + } + + // stream is ok and active, update stale tracker + p.stale.Refresh(now, id) + + // this is the first sample of the stream. there is nothing to + // aggregate with, so clone this value into the state and done + if !exist { + p.last.BeginWith(id, dp) + return keep + } + + // aggregate with state from previous requests. + // delta.AccumulateInto(state, dp) stores result in `state`. + // this is then copied into `dp` (the value passed onto the pipeline) + var err error + switch dp := dp.(type) { + case pmetric.NumberDataPoint: + state := p.last.nums[id] + err = delta.AccumulateInto(state, dp) + state.CopyTo(dp) + case pmetric.HistogramDataPoint: + state := p.last.hist[id] + err = delta.AccumulateInto(state, dp) + state.CopyTo(dp) + case pmetric.ExponentialHistogramDataPoint: + state := p.last.expo[id] + err = delta.AccumulateInto(state, dp) + state.CopyTo(dp) + } + if err != nil { + attrs.Set(telemetry.Cause(err)) + return drop + } + + return keep + }) + + // all remaining datapoints of this metric are now cumulative + m.Typed().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + // if no datapoints remain, drop empty metric + return m.Typed().Len() > 0 + }) + + // no need to continue pipeline if we dropped all metrics + if md.MetricCount() == 0 { + return nil + } + return p.next.ConsumeMetrics(ctx, md) +} + +func (p *Processor) Start(_ context.Context, _ component.Host) error { + if p.cfg.MaxStale != 0 { + // delete stale streams once per minute + go func() { + tick := time.NewTicker(time.Minute) + defer tick.Stop() + for { + select { + case <-p.ctx.Done(): + return + case <-tick.C: + p.mtx.Lock() + stale := p.stale.Collect(p.cfg.MaxStale) + for _, id := range stale { + p.last.Delete(id) + } + p.mtx.Unlock() + } + } + }() + } + + return nil +} + +func (p *Processor) Shutdown(_ context.Context) error { + p.cancel() + return nil +} + +func (p *Processor) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: true} +} + +// state keeps a cumulative value, aggregated over time, per stream +type state struct { + nums map[identity.Stream]pmetric.NumberDataPoint + hist map[identity.Stream]pmetric.HistogramDataPoint + expo map[identity.Stream]pmetric.ExponentialHistogramDataPoint +} + +func (m state) Len() int { + return len(m.nums) + len(m.hist) + len(m.expo) +} + +func (m state) Has(id identity.Stream) bool { + _, nok := m.nums[id] + _, hok := m.hist[id] + _, eok := m.expo[id] + return nok || hok || eok +} + +func (m state) Delete(id identity.Stream) { + delete(m.nums, id) + delete(m.hist, id) + delete(m.expo, id) +} + +func (m state) BeginWith(id identity.Stream, dp any) { + switch dp := dp.(type) { + case pmetric.NumberDataPoint: + m.nums[id] = pmetric.NewNumberDataPoint() + dp.CopyTo(m.nums[id]) + case pmetric.HistogramDataPoint: + m.hist[id] = pmetric.NewHistogramDataPoint() + dp.CopyTo(m.hist[id]) + case pmetric.ExponentialHistogramDataPoint: + m.expo[id] = pmetric.NewExponentialHistogramDataPoint() + dp.CopyTo(m.expo[id]) + } +} diff --git a/vendor/github.com/prometheus/client_golang/api/client.go b/vendor/github.com/prometheus/client_golang/api/client.go index 72a01309c3a..afcf122efc5 100644 --- a/vendor/github.com/prometheus/client_golang/api/client.go +++ b/vendor/github.com/prometheus/client_golang/api/client.go @@ -79,6 +79,10 @@ type Client interface { Do(context.Context, *http.Request) (*http.Response, []byte, error) } +type CloseIdler interface { + CloseIdleConnections() +} + // NewClient returns a new Client. // // It is safe to use the returned Client from multiple goroutines. @@ -118,6 +122,10 @@ func (c *httpClient) URL(ep string, args map[string]string) *url.URL { return &u } +func (c *httpClient) CloseIdleConnections() { + c.client.CloseIdleConnections() +} + func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { if ctx != nil { req = req.WithContext(ctx) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go b/vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go new file mode 100644 index 00000000000..b65896a3195 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go @@ -0,0 +1,50 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "sync/atomic" + "time" +) + +// atomicUpdateFloat atomically updates the float64 value pointed to by bits +// using the provided updateFunc, with an exponential backoff on contention. +func atomicUpdateFloat(bits *uint64, updateFunc func(float64) float64) { + const ( + // both numbers are derived from empirical observations + // documented in this PR: https://github.com/prometheus/client_golang/pull/1661 + maxBackoff = 320 * time.Millisecond + initialBackoff = 10 * time.Millisecond + ) + backoff := initialBackoff + + for { + loadedBits := atomic.LoadUint64(bits) + oldFloat := math.Float64frombits(loadedBits) + newFloat := updateFunc(oldFloat) + newBits := math.Float64bits(newFloat) + + if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { + break + } else { + // Exponential backoff with sleep and cap to avoid infinite wait + time.Sleep(backoff) + backoff *= 2 + if backoff > maxBackoff { + backoff = maxBackoff + } + } + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 4ce84e7a80e..2996aef6a0a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -134,13 +134,9 @@ func (c *counter) Add(v float64) { return } - for { - oldBits := atomic.LoadUint64(&c.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { - return - } - } + atomicUpdateFloat(&c.valBits, func(oldVal float64) float64 { + return oldVal + v + }) } func (c *counter) AddWithExemplar(v float64, e Labels) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 68ffe3c2480..ad347113c04 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -189,12 +189,15 @@ func (d *Desc) String() string { fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), ) } - vlStrings := make([]string, 0, len(d.variableLabels.names)) - for _, vl := range d.variableLabels.names { - if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { - vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) - } else { - vlStrings = append(vlStrings, vl) + vlStrings := []string{} + if d.variableLabels != nil { + vlStrings = make([]string, 0, len(d.variableLabels.names)) + for _, vl := range d.variableLabels.names { + if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { + vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) + } else { + vlStrings = append(vlStrings, vl) + } } } return fmt.Sprintf( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index dd2eac94067..aa18463654f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -120,13 +120,9 @@ func (g *gauge) Dec() { } func (g *gauge) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&g.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { - return - } - } + atomicUpdateFloat(&g.valBits, func(oldVal float64) float64 { + return oldVal + val + }) } func (g *gauge) Sub(val float64) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 51174641729..6b8684731c9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -288,7 +288,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } func attachOriginalName(desc, origName string) string { - return fmt.Sprintf("%s Sourced from %s", desc, origName) + return fmt.Sprintf("%s Sourced from %s.", desc, origName) } // Describe returns all descriptions of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 519db348a74..1a279035b30 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -14,6 +14,7 @@ package prometheus import ( + "errors" "fmt" "math" "runtime" @@ -28,6 +29,11 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" ) +const ( + nativeHistogramSchemaMaximum = 8 + nativeHistogramSchemaMinimum = -4 +) + // nativeHistogramBounds for the frac of observed values. Only relevant for // schema > 0. The position in the slice is the schema. (0 is never used, just // here for convenience of using the schema directly as the index.) @@ -330,11 +336,11 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { // used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. -func ExponentialBucketsRange(min, max float64, count int) []float64 { +func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 { if count < 1 { panic("ExponentialBucketsRange count needs a positive count") } - if min <= 0 { + if minBucket <= 0 { panic("ExponentialBucketsRange min needs to be greater than 0") } @@ -342,12 +348,12 @@ func ExponentialBucketsRange(min, max float64, count int) []float64 { // max = min*growthFactor^(bucketCount-1) // We know max/min and highest bucket. Solve for growthFactor. - growthFactor := math.Pow(max/min, 1.0/float64(count-1)) + growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1)) // Now that we know growthFactor, solve for each bucket. buckets := make([]float64, count) for i := 1; i <= count; i++ { - buckets[i-1] = min * math.Pow(growthFactor, float64(i-1)) + buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1)) } return buckets } @@ -858,15 +864,35 @@ func (h *histogram) Write(out *dto.Metric) error { // findBucket returns the index of the bucket for the provided value, or // len(h.upperBounds) for the +Inf bucket. func (h *histogram) findBucket(v float64) int { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + n := len(h.upperBounds) + if n == 0 { + return 0 + } + + // Early exit: if v is less than or equal to the first upper bound, return 0 + if v <= h.upperBounds[0] { + return 0 + } + + // Early exit: if v is greater than the last upper bound, return len(h.upperBounds) + if v > h.upperBounds[n-1] { + return n + } + + // For small arrays, use simple linear search + // "magic number" 35 is result of tests on couple different (AWS and baremetal) servers + // see more details here: https://github.com/prometheus/client_golang/pull/1662 + if n < 35 { + for i, bound := range h.upperBounds { + if v <= bound { + return i + } + } + // If v is greater than all upper bounds, return len(h.upperBounds) + return n + } + + // For larger arrays, use stdlib's binary search return sort.SearchFloat64s(h.upperBounds, v) } @@ -1440,9 +1466,9 @@ func pickSchema(bucketFactor float64) int32 { floor := math.Floor(math.Log2(math.Log2(bucketFactor))) switch { case floor <= -8: - return 8 + return nativeHistogramSchemaMaximum case floor >= 4: - return -4 + return nativeHistogramSchemaMinimum default: return -int32(floor) } @@ -1621,13 +1647,9 @@ func waitForCooldown(count uint64, counts *histogramCounts) { // atomicAddFloat adds the provided float atomically to another float // represented by the bit pattern the bits pointer is pointing to. func atomicAddFloat(bits *uint64, v float64) { - for { - loadedBits := atomic.LoadUint64(bits) - newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) - if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { - break - } - } + atomicUpdateFloat(bits, func(oldVal float64) float64 { + return oldVal + v + }) } // atomicDecUint32 atomically decrements the uint32 p points to. See @@ -1835,3 +1857,196 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) } } + +type constNativeHistogram struct { + desc *Desc + dto.Histogram + labelPairs []*dto.LabelPair +} + +func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error { + var bucketPopulationSum int64 + for _, v := range positiveBuckets { + bucketPopulationSum += v + } + for _, v := range negativeBuckets { + bucketPopulationSum += v + } + bucketPopulationSum += int64(zeroBucket) + + // If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts. + // Otherwise, the number of observations must be equal to the sum of all bucket counts . + + if math.IsNaN(sum) && bucketPopulationSum > int64(count) || + !math.IsNaN(sum) && bucketPopulationSum != int64(count) { + return errors.New("the sum of all bucket populations exceeds the count of observations") + } + return nil +} + +// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with +// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// zeroBucket counts all (positive and negative) +// observations in the zero bucket (with an absolute value less or equal +// the current threshold). +// positiveBuckets and negativeBuckets are separate maps for negative and positive +// observations. The map's value is an int64, counting observations in +// that bucket. The map's key is the +// index of the bucket according to the used +// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets. +// NewConstNativeHistogram returns an error if +// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid. +// - the schema passed is not between 8 and -4 +// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN) +// +// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus. +func NewConstNativeHistogram( + desc *Desc, + count uint64, + sum float64, + positiveBuckets, negativeBuckets map[int]int64, + zeroBucket uint64, + schema int32, + zeroThreshold float64, + createdTimestamp time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum { + return nil, errors.New("invalid native histogram schema") + } + if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil { + return nil, err + } + + NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets) + PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets) + ret := &constNativeHistogram{ + desc: desc, + Histogram: dto.Histogram{ + CreatedTimestamp: timestamppb.New(createdTimestamp), + Schema: &schema, + ZeroThreshold: &zeroThreshold, + SampleCount: &count, + SampleSum: &sum, + + NegativeSpan: NegativeSpan, + NegativeDelta: NegativeDelta, + + PositiveSpan: PositiveSpan, + PositiveDelta: PositiveDelta, + + ZeroCount: proto.Uint64(zeroBucket), + }, + labelPairs: MakeLabelPairs(desc, labelValues), + } + if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 { + ret.PositiveSpan = []*dto.BucketSpan{{ + Offset: proto.Int32(0), + Length: proto.Uint32(0), + }} + } + return ret, nil +} + +// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where +// NewConstNativeHistogram would have returned an error. +func MustNewConstNativeHistogram( + desc *Desc, + count uint64, + sum float64, + positiveBuckets, negativeBuckets map[int]int64, + zeroBucket uint64, + nativeHistogramSchema int32, + nativeHistogramZeroThreshold float64, + createdTimestamp time.Time, + labelValues ...string, +) Metric { + nativehistogram, err := NewConstNativeHistogram(desc, + count, + sum, + positiveBuckets, + negativeBuckets, + zeroBucket, + nativeHistogramSchema, + nativeHistogramZeroThreshold, + createdTimestamp, + labelValues...) + if err != nil { + panic(err) + } + return nativehistogram +} + +func (h *constNativeHistogram) Desc() *Desc { + return h.desc +} + +func (h *constNativeHistogram) Write(out *dto.Metric) error { + out.Histogram = &h.Histogram + out.Label = h.labelPairs + return nil +} + +func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) { + if len(buckets) == 0 { + return nil, nil + } + var ii []int + for k := range buckets { + ii = append(ii, k) + } + sort.Ints(ii) + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + count := buckets[i] + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one or two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index a595a203625..8b016355adb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -22,17 +22,18 @@ import ( "bytes" "fmt" "io" + "strconv" "strings" ) -func min(a, b int) int { +func minInt(a, b int) int { if a < b { return a } return b } -func max(a, b int) int { +func maxInt(a, b int) int { if a > b { return a } @@ -427,12 +428,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { if codes[0].Tag == 'e' { c := codes[0] i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2} } if codes[len(codes)-1].Tag == 'e' { c := codes[len(codes)-1] i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)} } nn := n + n groups := [][]OpCode{} @@ -443,12 +444,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { // there is a large range with no changes. if c.Tag == 'e' && i2-i1 > nn { group = append(group, OpCode{ - c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n), + c.Tag, i1, minInt(i2, i1+n), + j1, minInt(j2, j1+n), }) groups = append(groups, group) group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) + i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n) } group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) } @@ -515,7 +516,7 @@ func (m *SequenceMatcher) QuickRatio() float64 { // is faster to compute than either .Ratio() or .QuickRatio(). func (m *SequenceMatcher) RealQuickRatio() float64 { la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) + return calculateRatio(minInt(la, lb), la+lb) } // Convert range to the "ed" format @@ -524,7 +525,7 @@ func formatRangeUnified(start, stop int) string { beginning := start + 1 // lines start numbering with one length := stop - start if length == 1 { - return fmt.Sprintf("%d", beginning) + return strconv.Itoa(beginning) } if length == 0 { beginning-- // empty ranges begin at line just before the range diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index 97d17d6cb60..f7f97ef9262 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -66,7 +66,8 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) name += "_total" } - valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name)) + // Our current conversion moves to legacy naming, so use legacy validation. + valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 9d9b81ab448..592eec3e24f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -108,15 +108,23 @@ func BuildFQName(namespace, subsystem, name string) string { if name == "" { return "" } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") + + sb := strings.Builder{} + sb.Grow(len(namespace) + len(subsystem) + len(name) + 2) + + if namespace != "" { + sb.WriteString(namespace) + sb.WriteString("_") } - return name + + if subsystem != "" { + sb.WriteString(subsystem) + sb.WriteString("_") + } + + sb.WriteString(name) + + return sb.String() } type invalidMetric struct { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 62a4e7ad9a0..e7bce8b58ec 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -23,6 +23,7 @@ import ( type processCollector struct { collectFn func(chan<- Metric) + describeFn func(chan<- *Desc) pidFn func() (int, error) reportErrors bool cpuTotal *Desc @@ -122,26 +123,23 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { // Set up process metric collection if supported by the runtime. if canCollectProcess() { c.collectFn = c.processCollect + c.describeFn = c.describe } else { - c.collectFn = func(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) - } + c.collectFn = c.errorCollectFn + c.describeFn = c.errorDescribeFn } return c } -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime - ch <- c.inBytes - ch <- c.outBytes +func (c *processCollector) errorCollectFn(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) +} + +func (c *processCollector) errorDescribeFn(ch chan<- *Desc) { + if c.reportErrors { + ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform")) + } } // Collect returns the current state of all metrics of the collector. @@ -149,6 +147,11 @@ func (c *processCollector) Collect(ch chan<- Metric) { c.collectFn(ch) } +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + c.describeFn(ch) +} + func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { if !c.reportErrors { return diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.c b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.c new file mode 100644 index 00000000000..1554f674d85 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.c @@ -0,0 +1,84 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && cgo + +#include +#include +#include + +// The compiler warns that mach/shared_memory_server.h is deprecated, and to use +// mach/shared_region.h instead. But that doesn't define +// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and +// avoid a warning message when running tests. +#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U +#define SHARED_DATA_REGION_SIZE 0x10000000 +#define SHARED_TEXT_REGION_SIZE 0x10000000 + + +int get_memory_info(unsigned long long *rss, unsigned long long *vsize) +{ + // This is lightly adapted from how ps(1) obtains its memory info. + // https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109 + + kern_return_t error; + task_t task = MACH_PORT_NULL; + mach_task_basic_info_data_t info; + mach_msg_type_number_t info_count = MACH_TASK_BASIC_INFO_COUNT; + + error = task_info( + mach_task_self(), + MACH_TASK_BASIC_INFO, + (task_info_t) &info, + &info_count ); + + if( error != KERN_SUCCESS ) + { + return error; + } + + *rss = info.resident_size; + *vsize = info.virtual_size; + + { + vm_region_basic_info_data_64_t b_info; + mach_vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT; + mach_vm_size_t size; + mach_port_t object_name; + + /* + * try to determine if this task has the split libraries + * mapped in... if so, adjust its virtual size down by + * the 2 segments that are used for split libraries + */ + info_count = VM_REGION_BASIC_INFO_COUNT_64; + + error = mach_vm_region( + mach_task_self(), + &address, + &size, + VM_REGION_BASIC_INFO_64, + (vm_region_info_t) &b_info, + &info_count, + &object_name); + + if (error == KERN_SUCCESS) { + if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) && + *vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) { + *vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE); + } + } + } + + return 0; +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go new file mode 100644 index 00000000000..b375c3a7719 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go @@ -0,0 +1,51 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && cgo + +package prometheus + +/* +int get_memory_info(unsigned long long *rss, unsigned long long *vs); +*/ +import "C" +import "fmt" + +func getMemory() (*memoryInfo, error) { + var rss, vsize C.ulonglong + + if err := C.get_memory_info(&rss, &vsize); err != 0 { + return nil, fmt.Errorf("task_info() failed with 0x%x", int(err)) + } + + return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil +} + +// describe returns all descriptions of the collector for Darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.maxVsize + ch <- c.startTime + ch <- c.rss + ch <- c.vsize + + /* the process could be collected but not implemented yet + ch <- c.inBytes + ch <- c.outBytes + */ +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go new file mode 100644 index 00000000000..50eb860a682 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go @@ -0,0 +1,128 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "os" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// notImplementedErr is returned by stub functions that replace cgo functions, when cgo +// isn't available. +var notImplementedErr = errors.New("not implemented") + +type memoryInfo struct { + vsize uint64 // Virtual memory size in bytes + rss uint64 // Resident memory size in bytes +} + +func canCollectProcess() bool { + return true +} + +func getSoftLimit(which int) (uint64, error) { + rlimit := syscall.Rlimit{} + + if err := syscall.Getrlimit(which, &rlimit); err != nil { + return 0, err + } + + return rlimit.Cur, nil +} + +func getOpenFileCount() (float64, error) { + // Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to + // return a list of open fds, but that requires a way to call C APIs. The + // benefits, however, include fewer system calls and not failing when at the + // open file soft limit. + + if dir, err := os.Open("/dev/fd"); err != nil { + return 0.0, err + } else { + defer dir.Close() + + // Avoid ReadDir(), as it calls stat(2) on each descriptor. Not only is + // that info not used, but KQUEUE descriptors fail stat(2), which causes + // the whole method to fail. + if names, err := dir.Readdirnames(0); err != nil { + return 0.0, err + } else { + // Subtract 1 to ignore the open /dev/fd descriptor above. + return float64(len(names) - 1), nil + } + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil { + if len(procs) == 1 { + startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9) + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs)) + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, c.startTime, err) + } + + // The proc structure returned by kern.proc.pid above has an Rusage member, + // but it is not filled in, so it needs to be fetched by getrusage(2). For + // that call, the UTime, STime, and Maxrss members are filled out, but not + // Ixrss, Idrss, or Isrss for the memory usage. Memory stats will require + // access to the C API to call task_info(TASK_BASIC_INFO). + rusage := unix.Rusage{} + + if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil { + cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds() + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime) + } else { + c.reportError(ch, c.cpuTotal, err) + } + + if memInfo, err := getMemory(); err == nil { + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) + } else if !errors.Is(err, notImplementedErr) { + // Don't report an error when support is not compiled in. + c.reportError(ch, c.rss, err) + c.reportError(ch, c.vsize, err) + } + + if fds, err := getOpenFileCount(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds) + } else { + c.reportError(ch, c.openFDs, err) + } + + if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles)) + } else { + c.reportError(ch, c.maxFDs, err) + } + + if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil { + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace)) + } else { + c.reportError(ch, c.maxVsize, err) + } + + // TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might + // be able to get the per-process network send/receive counts. +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go new file mode 100644 index 00000000000..5165047311e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !cgo + +package prometheus + +func getMemory() (*memoryInfo, error) { + return nil, notImplementedErr +} + +// describe returns all descriptions of the collector for Darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.maxVsize + ch <- c.startTime + + /* the process could be collected but not implemented yet + ch <- c.rss + ch <- c.vsize + ch <- c.inBytes + ch <- c.outBytes + */ +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 14d56d2d068..9f4b130befa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows && !js && !wasip1 -// +build !windows,!js,!wasip1 +//go:build !windows && !js && !wasip1 && !darwin +// +build !windows,!js,!wasip1,!darwin package prometheus @@ -78,3 +78,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) { c.reportError(ch, nil, err) } } + +// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go deleted file mode 100644 index d8d9a6d7a2f..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build wasip1 -// +build wasip1 - -package prometheus - -func canCollectProcess() bool { - return false -} - -func (*processCollector) processCollect(chan<- Metric) { - // noop on this platform - return -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1_js.go similarity index 57% rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1_js.go index b1e363d6cf6..c68f7f85187 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1_js.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build js -// +build js +//go:build wasip1 || js +// +build wasip1 js package prometheus @@ -21,6 +21,13 @@ func canCollectProcess() bool { } func (c *processCollector) processCollect(ch chan<- Metric) { - // noop on this platform - return + c.errorCollectFn(ch) +} + +// describe returns all descriptions of the collector for wasip1 and js. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + c.errorDescribeFn(ch) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go index f973398df2d..fa474289ef8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -79,14 +79,10 @@ func getProcessHandleCount(handle windows.Handle) (uint32, error) { } func (c *processCollector) processCollect(ch chan<- Metric) { - h, err := windows.GetCurrentProcess() - if err != nil { - c.reportError(ch, nil, err) - return - } + h := windows.CurrentProcess() var startTime, exitTime, kernelTime, userTime windows.Filetime - err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) if err != nil { c.reportError(ch, nil, err) return @@ -111,6 +107,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) { ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. } +// describe returns all descriptions of the collector for windows. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime +} + func fileTimeToSeconds(ft windows.Filetime) float64 { return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index e598e66e688..28eed26727a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -207,7 +207,13 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO if encodingHeader != string(Identity) { rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) + + var enc expfmt.Encoder + if opts.EnableOpenMetricsTextCreatedSamples { + enc = expfmt.NewEncoder(w, contentType, expfmt.WithCreatedLines()) + } else { + enc = expfmt.NewEncoder(w, contentType) + } // handleError handles the error according to opts.ErrorHandling // and returns true if we have to abort after the handling. @@ -408,6 +414,21 @@ type HandlerOpts struct { // (which changes the identity of the resulting series on the Prometheus // server). EnableOpenMetrics bool + // EnableOpenMetricsTextCreatedSamples specifies if this handler should add, extra, synthetic + // Created Timestamps for counters, histograms and summaries, which for the current + // version of OpenMetrics are defined as extra series with the same name and "_created" + // suffix. See also the OpenMetrics specification for more details + // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1 + // + // Created timestamps are used to improve the accuracy of reset detection, + // but the way it's designed in OpenMetrics 1.0 it also dramatically increases cardinality + // if the scraper does not handle those metrics correctly (converting to created timestamp + // instead of leaving those series as-is). New OpenMetrics versions might improve + // this situation. + // + // Prometheus introduced the feature flag 'created-timestamp-zero-ingestion' + // in version 2.50.0 to handle this situation. + EnableOpenMetricsTextCreatedSamples bool // ProcessStartTime allows setting process start timevalue that will be exposed // with "Process-Start-Time-Unix" response header along with the metrics // payload. This allow callers to have efficient transformations to cumulative diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 1ab0e479655..76a9e12f4a4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -243,6 +243,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { s := &summary{ desc: desc, + now: opts.now, objectives: opts.Objectives, sortedObjectives: make([]float64, 0, len(opts.Objectives)), @@ -280,6 +281,8 @@ type summary struct { desc *Desc + now func() time.Time + objectives map[float64]float64 sortedObjectives []float64 @@ -307,7 +310,7 @@ func (s *summary) Observe(v float64) { s.bufMtx.Lock() defer s.bufMtx.Unlock() - now := time.Now() + now := s.now() if now.After(s.hotBufExpTime) { s.asyncFlush(now) } @@ -326,7 +329,7 @@ func (s *summary) Write(out *dto.Metric) error { s.bufMtx.Lock() s.mtx.Lock() // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) + s.swapBufs(s.now()) s.bufMtx.Unlock() s.flushColdBuf() @@ -468,13 +471,9 @@ func (s *noObjectivesSummary) Observe(v float64) { n := atomic.AddUint64(&s.countAndHotIdx, 1) hotCounts := s.counts[n>>63] - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break - } - } + atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 { + return oldVal + v + }) // Increment count last as we take it as a signal that the observation // is complete. atomic.AddUint64(&hotCounts.count, 1) @@ -516,14 +515,13 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error { // Finally add all the cold counts to the new hot counts and reset the cold counts. atomic.AddUint64(&hotCounts.count, count) atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } + + // Use atomicUpdateFloat to update hotCounts.sumBits atomically. + atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 { + return oldVal + sum.GetSampleSum() + }) + atomic.StoreUint64(&coldCounts.sumBits, 0) + return nil } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go index fdc1e623948..68645ed0a90 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go @@ -14,7 +14,7 @@ package validations import ( - "fmt" + "errors" "reflect" dto "github.com/prometheus/client_model/go" @@ -27,7 +27,7 @@ func LintDuplicateMetric(mf *dto.MetricFamily) []error { for i, m := range mf.Metric { for _, k := range mf.Metric[i+1:] { if reflect.DeepEqual(m.Label, k.Label) { - problems = append(problems, fmt.Errorf("metric not unique")) + problems = append(problems, errors.New("metric not unique")) break } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 6f1200180a7..1258508e4f9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -39,6 +39,7 @@ package testutil import ( "bytes" + "errors" "fmt" "io" "net/http" @@ -46,6 +47,7 @@ import ( "github.com/kylelemons/godebug/diff" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" "github.com/prometheus/client_golang/prometheus" @@ -300,20 +302,20 @@ func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...str // result. func compare(got, want []*dto.MetricFamily) error { var gotBuf, wantBuf bytes.Buffer - enc := expfmt.NewEncoder(&gotBuf, expfmt.NewFormat(expfmt.TypeTextPlain)) + enc := expfmt.NewEncoder(&gotBuf, expfmt.NewFormat(expfmt.TypeTextPlain).WithEscapingScheme(model.NoEscaping)) for _, mf := range got { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding gathered metrics failed: %w", err) } } - enc = expfmt.NewEncoder(&wantBuf, expfmt.NewFormat(expfmt.TypeTextPlain)) + enc = expfmt.NewEncoder(&wantBuf, expfmt.NewFormat(expfmt.TypeTextPlain).WithEscapingScheme(model.NoEscaping)) for _, mf := range want { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding expected metrics failed: %w", err) } } if diffErr := diff.Diff(gotBuf.String(), wantBuf.String()); diffErr != "" { - return fmt.Errorf(diffErr) + return errors.New(diffErr) } return nil } diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 57ec252adff..63809083aca 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -52,7 +52,8 @@ var ( http2Enabled: true, // 5 minutes is typically above the maximum sane scrape interval. So we can // use keepalive for all configurations. - idleConnTimeout: 5 * time.Minute, + idleConnTimeout: 5 * time.Minute, + newTLSConfigFunc: NewTLSConfigWithContext, } ) @@ -452,8 +453,12 @@ func (a *BasicAuth) UnmarshalYAML(unmarshal func(interface{}) error) error { // by net.Dialer. type DialContextFunc func(context.Context, string, string) (net.Conn, error) +// NewTLSConfigFunc returns tls.Config. +type NewTLSConfigFunc func(context.Context, *TLSConfig, ...TLSConfigOption) (*tls.Config, error) + type httpClientOptions struct { dialContextFunc DialContextFunc + newTLSConfigFunc NewTLSConfigFunc keepAlivesEnabled bool http2Enabled bool idleConnTimeout time.Duration @@ -473,13 +478,23 @@ func (f httpClientOptionFunc) applyToHTTPClientOptions(options *httpClientOption f(options) } -// WithDialContextFunc allows you to override func gets used for the actual dialing. The default is `net.Dialer.DialContext`. +// WithDialContextFunc allows you to override the func gets used for the dialing. +// The default is `net.Dialer.DialContext`. func WithDialContextFunc(fn DialContextFunc) HTTPClientOption { return httpClientOptionFunc(func(opts *httpClientOptions) { opts.dialContextFunc = fn }) } +// WithNewTLSConfigFunc allows you to override the func that creates the TLS config +// from the prometheus http config. +// The default is `NewTLSConfigWithContext`. +func WithNewTLSConfigFunc(newTLSConfigFunc NewTLSConfigFunc) HTTPClientOption { + return httpClientOptionFunc(func(opts *httpClientOptions) { + opts.newTLSConfigFunc = newTLSConfigFunc + }) +} + // WithKeepAlivesDisabled allows to disable HTTP keepalive. func WithKeepAlivesDisabled() HTTPClientOption { return httpClientOptionFunc(func(opts *httpClientOptions) { @@ -670,7 +685,7 @@ func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientCon return rt, nil } - tlsConfig, err := NewTLSConfig(&cfg.TLSConfig, WithSecretManager(opts.secretManager)) + tlsConfig, err := opts.newTLSConfigFunc(ctx, &cfg.TLSConfig, WithSecretManager(opts.secretManager)) if err != nil { return nil, err } @@ -679,6 +694,7 @@ func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientCon if err != nil { return nil, err } + if tlsSettings.immutable() { // No need for a RoundTripper that reloads the files automatically. return newRT(tlsConfig) diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index f1c495dd606..a21ed4ec1f8 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -38,7 +38,7 @@ type EncoderOption func(*encoderOption) // WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder // to include _created lines (See -// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1). +// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1). // Created timestamps can improve the accuracy of series reset detection, but // come with a bandwidth cost. // @@ -102,7 +102,7 @@ func WithUnit() EncoderOption { // // - According to the OM specs, the `# UNIT` line is optional, but if populated, // the unit has to be present in the metric name as its suffix: -// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit). +// (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit). // However, in order to accommodate any potential scenario where such a change in the // metric name is not desirable, the users are here given the choice of either explicitly // opt in, in case they wish for the unit to be included in the output AND in the metric name diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 0daca836afa..5766107cf95 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -28,13 +28,13 @@ import ( var ( // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode - // in isolation from other components that don't support UTF-8 may result in - // bugs or other undefined behavior. This value is intended to be set by - // UTF-8-aware binaries as part of their startup. To avoid need for locking, - // this value should be set once, ideally in an init(), before multiple - // goroutines are started. - NameValidationScheme = LegacyValidation + // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 + // mode in isolation from other components that don't support UTF-8 may result + // in bugs or other undefined behavior. This value can be set to + // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To + // avoid need for locking, this value should be set once, ideally in an + // init(), before multiple goroutines are started. + NameValidationScheme = UTF8Validation // NameEscapingScheme defines the default way that names will be escaped when // presented to systems that do not support UTF-8 names. If the Content-Type diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 73282ac4295..465affe0823 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -523,7 +523,7 @@ var ( ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", PrometheusText0_0_4: "text/plain;version=0.0.4", - PrometheusText1_0_0: "text/plain;version=1.0.0;escaping=allow-utf-8", + PrometheusText1_0_0: "text/plain;version=1.0.0", OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1", OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0", } diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index 2bec6cfabb9..13734844260 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -29,7 +29,8 @@ import ( ) var ( - relabelTarget = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) + // relabelTargetLegacy allows targeting labels with legacy Prometheus character set, plus ${} variables for dynamic characters from source the metrics. + relabelTargetLegacy = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) DefaultRelabelConfig = Config{ Action: Replace, @@ -124,10 +125,22 @@ func (c *Config) Validate() error { if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } - if c.Action == Replace && !strings.Contains(c.TargetLabel, "$") && !model.LabelName(c.TargetLabel).IsValid() { + if c.Action == Replace && !varInRegexTemplate(c.TargetLabel) && !model.LabelName(c.TargetLabel).IsValid() { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } - if c.Action == Replace && strings.Contains(c.TargetLabel, "$") && !relabelTarget.MatchString(c.TargetLabel) { + + isValidLabelNameWithRegexVarFn := func(value string) bool { + // UTF-8 allows ${} characters, so standard validation allow $variables by default. + // TODO(bwplotka): Relabelling users cannot put $ and ${<...>} characters in metric names or values. + // Design escaping mechanism to allow that, once valid use case appears. + return model.LabelName(value).IsValid() + } + if model.NameValidationScheme == model.LegacyValidation { + isValidLabelNameWithRegexVarFn = func(value string) bool { + return relabelTargetLegacy.MatchString(value) + } + } + if c.Action == Replace && varInRegexTemplate(c.TargetLabel) && !isValidLabelNameWithRegexVarFn(c.TargetLabel) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !model.LabelName(c.TargetLabel).IsValid() { @@ -136,7 +149,7 @@ func (c *Config) Validate() error { if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement { return fmt.Errorf("'replacement' can not be set for %s action", c.Action) } - if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) { + if c.Action == LabelMap && !isValidLabelNameWithRegexVarFn(c.Replacement) { return fmt.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action) } if c.Action == HashMod && !model.LabelName(c.TargetLabel).IsValid() { diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index bb36a21208e..3fc3fa04374 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -233,6 +233,14 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { node: &r.Record, }) } + // While record is a valid UTF-8 it's common mistake to put PromQL expression in the record name. + // Disallow "{}" chars. + if strings.Contains(r.Record.Value, "{") || strings.Contains(r.Record.Value, "}") { + nodes = append(nodes, WrappedError{ + err: fmt.Errorf("braces present in the recording rule name; should it be in expr?: %s", r.Record.Value), + node: &r.Record, + }) + } } for k, v := range r.Labels { @@ -314,7 +322,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) { } // Parse parses and validates a set of rules. -func Parse(content []byte) (*RuleGroups, []error) { +func Parse(content []byte, ignoreUnknownFields bool) (*RuleGroups, []error) { var ( groups RuleGroups node ruleGroups @@ -322,7 +330,9 @@ func Parse(content []byte) (*RuleGroups, []error) { ) decoder := yaml.NewDecoder(bytes.NewReader(content)) - decoder.KnownFields(true) + if !ignoreUnknownFields { + decoder.KnownFields(true) + } err := decoder.Decode(&groups) // Ignore io.EOF which happens with empty input. if err != nil && !errors.Is(err, io.EOF) { @@ -341,12 +351,12 @@ func Parse(content []byte) (*RuleGroups, []error) { } // ParseFile reads and parses rules from a file. -func ParseFile(file string) (*RuleGroups, []error) { +func ParseFile(file string, ignoreUnknownFields bool) (*RuleGroups, []error) { b, err := os.ReadFile(file) if err != nil { return nil, []error{fmt.Errorf("%s: %w", file, err)} } - rgs, errs := Parse(b) + rgs, errs := Parse(b, ignoreUnknownFields) for i := range errs { errs[i] = fmt.Errorf("%s: %w", file, errs[i]) } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go index ff756965f49..83e381539f1 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go @@ -177,61 +177,63 @@ func (p *NHCBParser) CreatedTimestamp() *int64 { } func (p *NHCBParser) Next() (Entry, error) { - if p.state == stateEmitting { - p.state = stateStart - if p.entry == EntrySeries { - isNHCB := p.handleClassicHistogramSeries(p.lset) - if isNHCB && !p.keepClassicHistograms { - // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. - return p.Next() + for { + if p.state == stateEmitting { + p.state = stateStart + if p.entry == EntrySeries { + isNHCB := p.handleClassicHistogramSeries(p.lset) + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + continue + } } + return p.entry, p.err } - return p.entry, p.err - } - p.entry, p.err = p.parser.Next() - if p.err != nil { - if errors.Is(p.err, io.EOF) && p.processNHCB() { - return EntryHistogram, nil - } - return EntryInvalid, p.err - } - switch p.entry { - case EntrySeries: - p.bytes, p.ts, p.value = p.parser.Series() - p.metricString = p.parser.Metric(&p.lset) - // Check the label set to see if we can continue or need to emit the NHCB. - var isNHCB bool - if p.compareLabels() { - // Labels differ. Check if we can emit the NHCB. - if p.processNHCB() { + p.entry, p.err = p.parser.Next() + if p.err != nil { + if errors.Is(p.err, io.EOF) && p.processNHCB() { return EntryHistogram, nil } - isNHCB = p.handleClassicHistogramSeries(p.lset) - } else { - // Labels are the same. Check if after an exponential histogram. - if p.lastHistogramExponential { - isNHCB = false - } else { + return EntryInvalid, p.err + } + switch p.entry { + case EntrySeries: + p.bytes, p.ts, p.value = p.parser.Series() + p.metricString = p.parser.Metric(&p.lset) + // Check the label set to see if we can continue or need to emit the NHCB. + var isNHCB bool + if p.compareLabels() { + // Labels differ. Check if we can emit the NHCB. + if p.processNHCB() { + return EntryHistogram, nil + } isNHCB = p.handleClassicHistogramSeries(p.lset) + } else { + // Labels are the same. Check if after an exponential histogram. + if p.lastHistogramExponential { + isNHCB = false + } else { + isNHCB = p.handleClassicHistogramSeries(p.lset) + } + } + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + continue } + return p.entry, p.err + case EntryHistogram: + p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() + p.metricString = p.parser.Metric(&p.lset) + p.storeExponentialLabels() + case EntryType: + p.bName, p.typ = p.parser.Type() } - if isNHCB && !p.keepClassicHistograms { - // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. - return p.Next() + if p.processNHCB() { + return EntryHistogram, nil } return p.entry, p.err - case EntryHistogram: - p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() - p.metricString = p.parser.Metric(&p.lset) - p.storeExponentialLabels() - case EntryType: - p.bName, p.typ = p.parser.Type() - } - if p.processNHCB() { - return EntryHistogram, nil } - return p.entry, p.err } // Return true if labels have changed and we should emit the NHCB. diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index 956fd4652ac..fbc37c29ef7 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -56,7 +56,7 @@ const ( alertmanagerLabel = "alertmanager" ) -var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) +var userAgent = version.PrometheusUserAgent() // Alert is a generic representation of an alert in the Prometheus eco-system. type Alert struct { diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index bbd84102684..cf669282011 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -47,6 +47,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/zeropool" ) @@ -123,12 +124,13 @@ type QueryEngine interface { NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) } +var _ QueryLogger = (*logging.JSONFileLogger)(nil) + // QueryLogger is an interface that can be used to log all the queries logged // by the engine. type QueryLogger interface { - Log(context.Context, slog.Level, string, ...any) - With(args ...any) - Close() error + slog.Handler + io.Closer } // A Query is derived from an a raw query string and can be run against an engine @@ -628,6 +630,9 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota defer func() { ng.queryLoggerLock.RLock() if l := ng.queryLogger; l != nil { + logger := slog.New(l) + f := make([]slog.Attr, 0, 16) // Probably enough up front to not need to reallocate on append. + params := make(map[string]interface{}, 4) params["query"] = q.q if eq, ok := q.Statement().(*parser.EvalStmt); ok { @@ -636,20 +641,20 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota // The step provided by the user is in seconds. params["step"] = int64(eq.Interval / (time.Second / time.Nanosecond)) } - f := []interface{}{"params", params} + f = append(f, slog.Any("params", params)) if err != nil { - f = append(f, "error", err) + f = append(f, slog.Any("error", err)) } - f = append(f, "stats", stats.NewQueryStats(q.Stats())) + f = append(f, slog.Any("stats", stats.NewQueryStats(q.Stats()))) if span := trace.SpanFromContext(ctx); span != nil { - f = append(f, "spanID", span.SpanContext().SpanID()) + f = append(f, slog.Any("spanID", span.SpanContext().SpanID())) } if origin := ctx.Value(QueryOrigin{}); origin != nil { for k, v := range origin.(map[string]interface{}) { - f = append(f, k, v) + f = append(f, slog.Any(k, v)) } } - l.Log(context.Background(), slog.LevelInfo, "promql query logged", f...) + logger.LogAttrs(context.Background(), slog.LevelInfo, "promql query logged", f...) // TODO: @tjhop -- do we still need this metric/error log if logger doesn't return errors? // ng.metrics.queryLogFailures.Inc() // ng.logger.Error("can't log query", "err", err) @@ -3721,14 +3726,15 @@ func detectHistogramStatsDecoding(expr parser.Expr) { if !ok { continue } - if call.Func.Name == "histogram_count" || call.Func.Name == "histogram_sum" { + switch call.Func.Name { + case "histogram_count", "histogram_sum", "histogram_avg": n.SkipHistogramBuckets = true - break - } - if call.Func.Name == "histogram_quantile" || call.Func.Name == "histogram_fraction" { + case "histogram_quantile", "histogram_fraction": n.SkipHistogramBuckets = false - break + default: + continue } + break } return errors.New("stop") }) diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index da1821fd18a..605661e5a0e 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -355,7 +355,7 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // https://en.wikipedia.org/wiki/Exponential_smoothing . func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] - + metricName := samples.Metric.Get(labels.MetricName) // The smoothing factor argument. sf := vals[1].(Vector)[0].F @@ -374,6 +374,10 @@ func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions // Can't do the smoothing operation with less than two points. if l < 2 { + // Annotate mix of float and histogram. + if l == 1 && len(samples.Histograms) > 0 { + return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return enh.Out, nil } @@ -394,7 +398,9 @@ func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions s0, s1 = s1, x+y } - + if len(samples.Histograms) > 0 { + return append(enh.Out, Sample{F: s1}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return append(enh.Out, Sample{F: s1}), nil } @@ -685,9 +691,15 @@ func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod // === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { values := make(vectorByValueHeap, 0, len(s.Floats)) for _, f := range s.Floats { @@ -699,18 +711,20 @@ func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode values = append(values, Sample{F: math.Abs(f.F - median)}) } return quantile(0.5, values) - }), nil + }), annos } // === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. max_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { maxVal := s.Floats[0].F for _, f := range s.Floats { @@ -719,18 +733,20 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return maxVal - }), nil + }), annos } // === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. min_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { minVal := s.Floats[0].F for _, f := range s.Floats { @@ -739,7 +755,7 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return minVal - }), nil + }), annos } // === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === @@ -788,10 +804,6 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva q := vals[0].(Vector)[0].F el := vals[1].(Matrix)[0] if len(el.Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. quantile_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. return enh.Out, nil } @@ -799,7 +811,10 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva if math.IsNaN(q) || q < 0 || q > 1 { annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) } - + if len(el.Histograms) > 0 { + metricName := el.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo(metricName, args[0].PositionRange())) + } values := make(vectorByValueHeap, 0, len(el.Floats)) for _, f := range el.Floats { values = append(values, Sample{F: f.F}) @@ -809,13 +824,15 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva // === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. stddev_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 @@ -827,18 +844,20 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return math.Sqrt((aux + cAux) / count) - }), nil + }), annos } // === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. stdvar_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 @@ -850,7 +869,7 @@ func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return (aux + cAux) / count - }), nil + }), annos } // === absent(Vector parser.ValueTypeVector) (Vector, Annotations) === @@ -1110,10 +1129,15 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f // === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) === func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] + metricName := samples.Metric.Get(labels.MetricName) - // No sense in trying to compute a derivative without at least two points. + // No sense in trying to compute a derivative without at least two float points. // Drop this Vector element. if len(samples.Floats) < 2 { + // Annotate mix of float and histogram. + if len(samples.Floats) == 1 && len(samples.Histograms) > 0 { + return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return enh.Out, nil } @@ -1121,6 +1145,9 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // to avoid floating point accuracy issues, see // https://github.com/prometheus/prometheus/issues/2674 slope, _ := linearRegression(samples.Floats, samples.Floats[0].T) + if len(samples.Histograms) > 0 { + return append(enh.Out, Sample{F: slope}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return append(enh.Out, Sample{F: slope}), nil } @@ -1128,13 +1155,22 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] duration := vals[1].(Vector)[0].F - // No sense in trying to predict anything without at least two points. + metricName := samples.Metric.Get(labels.MetricName) + + // No sense in trying to predict anything without at least two float points. // Drop this Vector element. if len(samples.Floats) < 2 { + // Annotate mix of float and histogram. + if len(samples.Floats) == 1 && len(samples.Histograms) > 0 { + return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return enh.Out, nil } - slope, intercept := linearRegression(samples.Floats, enh.Ts) + slope, intercept := linearRegression(samples.Floats, enh.Ts) + if len(samples.Histograms) > 0 { + return append(enh.Out, Sample{F: slope*duration + intercept}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return append(enh.Out, Sample{F: slope*duration + intercept}), nil } @@ -1351,14 +1387,13 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev sample.Metric = labels.NewBuilder(sample.Metric). Del(excludedLabels...). Labels() - mb = &metricWithBuckets{sample.Metric, nil} enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb } mb.buckets = append(mb.buckets, Bucket{upperBound, sample.F}) } - // Now deal with the histograms. + // Now deal with the native histograms. for _, sample := range histogramSamples { // We have to reconstruct the exact same signature as above for // a classic histogram, just ignoring any le label. @@ -1382,16 +1417,23 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev }) } + // Now do classic histograms that have already been filtered for conflicting native histograms. for _, mb := range enh.signatureToMetricWithBuckets { if len(mb.buckets) > 0 { res, forcedMonotonicity, _ := BucketQuantile(q, mb.buckets) - enh.Out = append(enh.Out, Sample{ - Metric: mb.metric, - F: res, - }) if forcedMonotonicity { annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(mb.metric.Get(labels.MetricName), args[1].PositionRange())) } + + if !enh.enableDelayedNameRemoval { + mb.metric = mb.metric.DropMetricName() + } + + enh.Out = append(enh.Out, Sample{ + Metric: mb.metric, + F: res, + DropName: true, + }) } } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index 3865dc6548d..cdb4532d3bd 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -363,17 +363,18 @@ grouping_label_list: grouping_label : maybe_label { if !model.LabelName($1.Val).IsValid() { - yylex.(*parser).unexpected("grouping opts", "label") + yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", $1.Val) } $$ = $1 } | STRING { - if !model.LabelName(yylex.(*parser).unquoteString($1.Val)).IsValid() { - yylex.(*parser).unexpected("grouping opts", "label") + unquoted := yylex.(*parser).unquoteString($1.Val) + if !model.LabelName(unquoted).IsValid() { + yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", unquoted) } $$ = $1 $$.Pos++ - $$.Val = yylex.(*parser).unquoteString($$.Val) + $$.Val = unquoted } | error { yylex.(*parser).unexpected("grouping opts", "label"); $$ = Item{} } @@ -487,7 +488,7 @@ matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET if errMsg != ""{ errRange := mergeRanges(&$2, &$4) - yylex.(*parser).addParseErrf(errRange, errMsg) + yylex.(*parser).addParseErrf(errRange, "%s", errMsg) } numLit, _ := $3.(*NumberLiteral) diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go index 7ff8591169b..78d5e15245e 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go @@ -1259,19 +1259,20 @@ yydefault: yyDollar = yyS[yypt-1 : yypt+1] { if !model.LabelName(yyDollar[1].item.Val).IsValid() { - yylex.(*parser).unexpected("grouping opts", "label") + yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", yyDollar[1].item.Val) } yyVAL.item = yyDollar[1].item } case 59: yyDollar = yyS[yypt-1 : yypt+1] { - if !model.LabelName(yylex.(*parser).unquoteString(yyDollar[1].item.Val)).IsValid() { - yylex.(*parser).unexpected("grouping opts", "label") + unquoted := yylex.(*parser).unquoteString(yyDollar[1].item.Val) + if !model.LabelName(unquoted).IsValid() { + yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", unquoted) } yyVAL.item = yyDollar[1].item yyVAL.item.Pos++ - yyVAL.item.Val = yylex.(*parser).unquoteString(yyVAL.item.Val) + yyVAL.item.Val = unquoted } case 60: yyDollar = yyS[yypt-1 : yypt+1] @@ -1384,7 +1385,7 @@ yydefault: if errMsg != "" { errRange := mergeRanges(&yyDollar[2].item, &yyDollar[4].item) - yylex.(*parser).addParseErrf(errRange, errMsg) + yylex.(*parser).addParseErrf(errRange, "%s", errMsg) } numLit, _ := yyDollar[3].node.(*NumberLiteral) diff --git a/vendor/github.com/prometheus/prometheus/promql/quantile.go b/vendor/github.com/prometheus/prometheus/promql/quantile.go index a6851810c5a..f3af82487c7 100644 --- a/vendor/github.com/prometheus/prometheus/promql/quantile.go +++ b/vendor/github.com/prometheus/prometheus/promql/quantile.go @@ -47,7 +47,6 @@ const smallDeltaTolerance = 1e-12 // excludedLabels are the labels to exclude from signature calculation for // quantiles. var excludedLabels = []string{ - labels.MetricName, labels.BucketLabel, } diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go index e7f15baefeb..77d53395e09 100644 --- a/vendor/github.com/prometheus/prometheus/rules/alerting.go +++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go @@ -143,8 +143,9 @@ type AlertingRule struct { logger *slog.Logger - noDependentRules *atomic.Bool - noDependencyRules *atomic.Bool + dependenciesMutex sync.RWMutex + dependentRules []Rule + dependencyRules []Rule } // NewAlertingRule constructs a new AlertingRule. @@ -171,8 +172,6 @@ func NewAlertingRule( evaluationTimestamp: atomic.NewTime(time.Time{}), evaluationDuration: atomic.NewDuration(0), lastError: atomic.NewError(nil), - noDependentRules: atomic.NewBool(false), - noDependencyRules: atomic.NewBool(false), } } @@ -316,20 +315,54 @@ func (r *AlertingRule) Restored() bool { return r.restored.Load() } -func (r *AlertingRule) SetNoDependentRules(noDependentRules bool) { - r.noDependentRules.Store(noDependentRules) +func (r *AlertingRule) SetDependentRules(dependents []Rule) { + r.dependenciesMutex.Lock() + defer r.dependenciesMutex.Unlock() + + r.dependentRules = make([]Rule, len(dependents)) + copy(r.dependentRules, dependents) } func (r *AlertingRule) NoDependentRules() bool { - return r.noDependentRules.Load() + r.dependenciesMutex.RLock() + defer r.dependenciesMutex.RUnlock() + + if r.dependentRules == nil { + return false // We don't know if there are dependent rules. + } + + return len(r.dependentRules) == 0 +} + +func (r *AlertingRule) DependentRules() []Rule { + r.dependenciesMutex.RLock() + defer r.dependenciesMutex.RUnlock() + return r.dependentRules } -func (r *AlertingRule) SetNoDependencyRules(noDependencyRules bool) { - r.noDependencyRules.Store(noDependencyRules) +func (r *AlertingRule) SetDependencyRules(dependencies []Rule) { + r.dependenciesMutex.Lock() + defer r.dependenciesMutex.Unlock() + + r.dependencyRules = make([]Rule, len(dependencies)) + copy(r.dependencyRules, dependencies) } func (r *AlertingRule) NoDependencyRules() bool { - return r.noDependencyRules.Load() + r.dependenciesMutex.RLock() + defer r.dependenciesMutex.RUnlock() + + if r.dependencyRules == nil { + return false // We don't know if there are dependency rules. + } + + return len(r.dependencyRules) == 0 +} + +func (r *AlertingRule) DependencyRules() []Rule { + r.dependenciesMutex.RLock() + defer r.dependenciesMutex.RUnlock() + return r.dependencyRules } // resolvedRetention is the duration for which a resolved alert instance diff --git a/vendor/github.com/prometheus/prometheus/rules/group.go b/vendor/github.com/prometheus/prometheus/rules/group.go index ecc96d0a123..9ad9aab0937 100644 --- a/vendor/github.com/prometheus/prometheus/rules/group.go +++ b/vendor/github.com/prometheus/prometheus/rules/group.go @@ -74,9 +74,7 @@ type Group struct { // defaults to DefaultEvalIterationFunc. evalIterationFunc GroupEvalIterationFunc - // concurrencyController controls the rules evaluation concurrency. - concurrencyController RuleConcurrencyController - appOpts *storage.AppendOptions + appOpts *storage.AppendOptions } // GroupEvalIterationFunc is used to implement and extend rule group @@ -126,33 +124,27 @@ func NewGroup(o GroupOptions) *Group { evalIterationFunc = DefaultEvalIterationFunc } - concurrencyController := opts.RuleConcurrencyController - if concurrencyController == nil { - concurrencyController = sequentialRuleEvalController{} - } - if opts.Logger == nil { opts.Logger = promslog.NewNopLogger() } return &Group{ - name: o.Name, - file: o.File, - interval: o.Interval, - queryOffset: o.QueryOffset, - limit: o.Limit, - rules: o.Rules, - shouldRestore: o.ShouldRestore, - opts: opts, - seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)), - done: make(chan struct{}), - managerDone: o.done, - terminated: make(chan struct{}), - logger: opts.Logger.With("file", o.File, "group", o.Name), - metrics: metrics, - evalIterationFunc: evalIterationFunc, - concurrencyController: concurrencyController, - appOpts: &storage.AppendOptions{DiscardOutOfOrder: true}, + name: o.Name, + file: o.File, + interval: o.Interval, + queryOffset: o.QueryOffset, + limit: o.Limit, + rules: o.Rules, + shouldRestore: o.ShouldRestore, + opts: opts, + seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)), + done: make(chan struct{}), + managerDone: o.done, + terminated: make(chan struct{}), + logger: opts.Logger.With("file", o.File, "group", o.Name), + metrics: metrics, + evalIterationFunc: evalIterationFunc, + appOpts: &storage.AppendOptions{DiscardOutOfOrder: true}, } } @@ -310,11 +302,19 @@ func (g *Group) run(ctx context.Context) { } } -func (g *Group) stop() { +func (g *Group) stopAsync() { close(g.done) +} + +func (g *Group) waitStopped() { <-g.terminated } +func (g *Group) stop() { + g.stopAsync() + g.waitStopped() +} + func (g *Group) hash() uint64 { l := labels.New( labels.Label{Name: "name", Value: g.name}, @@ -647,25 +647,51 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { } var wg sync.WaitGroup - for i, rule := range g.rules { - select { - case <-g.done: - return - default: - } - - if ctrl := g.concurrencyController; ctrl.Allow(ctx, g, rule) { - wg.Add(1) + ctrl := g.opts.RuleConcurrencyController + if ctrl == nil { + ctrl = sequentialRuleEvalController{} + } - go eval(i, rule, func() { - wg.Done() - ctrl.Done(ctx) - }) - } else { + batches := ctrl.SplitGroupIntoBatches(ctx, g) + if len(batches) == 0 { + // Sequential evaluation when batches aren't set. + // This is the behaviour without a defined RuleConcurrencyController + for i, rule := range g.rules { + // Check if the group has been stopped. + select { + case <-g.done: + return + default: + } eval(i, rule, nil) } + } else { + // Concurrent evaluation. + for _, batch := range batches { + for _, ruleIndex := range batch { + // Check if the group has been stopped. + select { + case <-g.done: + wg.Wait() + return + default: + } + rule := g.rules[ruleIndex] + if len(batch) > 1 && ctrl.Allow(ctx, g, rule) { + wg.Add(1) + + go eval(ruleIndex, rule, func() { + wg.Done() + ctrl.Done(ctx) + }) + } else { + eval(ruleIndex, rule, nil) + } + } + // It is important that we finish processing any rules in this current batch - before we move into the next one. + wg.Wait() + } } - wg.Wait() g.metrics.GroupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal.Load()) g.cleanupStaleSeries(ctx, ts) @@ -1034,27 +1060,25 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { // output metric produced by another rule in its expression (i.e. as its "input"). type dependencyMap map[Rule][]Rule -// dependents returns the count of rules which use the output of the given rule as one of their inputs. -func (m dependencyMap) dependents(r Rule) int { - return len(m[r]) +// dependents returns the rules which use the output of the given rule as one of their inputs. +func (m dependencyMap) dependents(r Rule) []Rule { + return m[r] } -// dependencies returns the count of rules on which the given rule is dependent for input. -func (m dependencyMap) dependencies(r Rule) int { +// dependencies returns the rules on which the given rule is dependent for input. +func (m dependencyMap) dependencies(r Rule) []Rule { if len(m) == 0 { - return 0 + return []Rule{} } - var count int - for _, children := range m { - for _, child := range children { - if child == r { - count++ - } + var dependencies []Rule + for rule, dependents := range m { + if slices.Contains(dependents, r) { + dependencies = append(dependencies, rule) } } - return count + return dependencies } // isIndependent determines whether the given rule is not dependent on another rule for its input, nor is any other rule @@ -1064,7 +1088,7 @@ func (m dependencyMap) isIndependent(r Rule) bool { return false } - return m.dependents(r)+m.dependencies(r) == 0 + return len(m.dependents(r)) == 0 && len(m.dependencies(r)) == 0 } // buildDependencyMap builds a data-structure which contains the relationships between rules within a group. diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index edc67a832b0..b1d3e8e3d6f 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -90,12 +90,13 @@ func DefaultEvalIterationFunc(ctx context.Context, g *Group, evalTimestamp time. // The Manager manages recording and alerting rules. type Manager struct { - opts *ManagerOptions - groups map[string]*Group - mtx sync.RWMutex - block chan struct{} - done chan struct{} - restored bool + opts *ManagerOptions + groups map[string]*Group + mtx sync.RWMutex + block chan struct{} + done chan struct{} + restored bool + restoreNewRuleGroups bool logger *slog.Logger } @@ -122,6 +123,10 @@ type ManagerOptions struct { ConcurrentEvalsEnabled bool RuleConcurrencyController RuleConcurrencyController RuleDependencyController RuleDependencyController + // At present, manager only restores `for` state when manager is newly created which happens + // during restarts. This flag provides an option to restore the `for` state when new rule groups are + // added to an existing manager + RestoreNewRuleGroups bool Metrics *Metrics } @@ -154,11 +159,12 @@ func NewManager(o *ManagerOptions) *Manager { } m := &Manager{ - groups: map[string]*Group{}, - opts: o, - block: make(chan struct{}), - done: make(chan struct{}), - logger: o.Logger, + groups: map[string]*Group{}, + opts: o, + block: make(chan struct{}), + done: make(chan struct{}), + logger: o.Logger, + restoreNewRuleGroups: o.RestoreNewRuleGroups, } return m @@ -182,8 +188,14 @@ func (m *Manager) Stop() { m.logger.Info("Stopping rule manager...") + // Stop all groups asynchronously, then wait for them to finish. + // This is faster than stopping and waiting for each group in sequence. for _, eg := range m.groups { - eg.stop() + eg.stopAsync() + } + + for _, eg := range m.groups { + eg.waitStopped() } // Shut down the groups waiting multiple evaluation intervals to write @@ -207,7 +219,7 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels default: } - groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, files...) + groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, false, files...) if errs != nil { for _, e := range errs { @@ -276,7 +288,7 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels // GroupLoader is responsible for loading rule groups from arbitrary sources and parsing them. type GroupLoader interface { - Load(identifier string) (*rulefmt.RuleGroups, []error) + Load(identifier string, ignoreUnknownFields bool) (*rulefmt.RuleGroups, []error) Parse(query string) (parser.Expr, error) } @@ -284,22 +296,22 @@ type GroupLoader interface { // and parser.ParseExpr. type FileLoader struct{} -func (FileLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) { - return rulefmt.ParseFile(identifier) +func (FileLoader) Load(identifier string, ignoreUnknownFields bool) (*rulefmt.RuleGroups, []error) { + return rulefmt.ParseFile(identifier, ignoreUnknownFields) } func (FileLoader) Parse(query string) (parser.Expr, error) { return parser.ParseExpr(query) } // LoadGroups reads groups from a list of files. func (m *Manager) LoadGroups( - interval time.Duration, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc, filenames ...string, + interval time.Duration, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc, ignoreUnknownFields bool, filenames ...string, ) (map[string]*Group, []error) { groups := make(map[string]*Group) - shouldRestore := !m.restored + shouldRestore := !m.restored || m.restoreNewRuleGroups for _, fn := range filenames { - rgs, errs := m.opts.GroupLoader.Load(fn) + rgs, errs := m.opts.GroupLoader.Load(fn, ignoreUnknownFields) if errs != nil { return nil, errs } @@ -329,7 +341,7 @@ func (m *Manager) LoadGroups( labels.FromMap(r.Annotations), externalLabels, externalURL, - m.restored, + !shouldRestore, m.logger.With("alert", r.Alert), )) continue @@ -444,8 +456,8 @@ func SendAlerts(s Sender, externalURL string) NotifyFunc { // RuleDependencyController controls whether a set of rules have dependencies between each other. type RuleDependencyController interface { // AnalyseRules analyses dependencies between the input rules. For each rule that it's guaranteed - // not having any dependants and/or dependency, this function should call Rule.SetNoDependentRules(true) - // and/or Rule.SetNoDependencyRules(true). + // not having any dependants and/or dependency, this function should call Rule.SetDependentRules(...) + // and/or Rule.SetDependencyRules(...). AnalyseRules(rules []Rule) } @@ -460,15 +472,22 @@ func (c ruleDependencyController) AnalyseRules(rules []Rule) { } for _, r := range rules { - r.SetNoDependentRules(depMap.dependents(r) == 0) - r.SetNoDependencyRules(depMap.dependencies(r) == 0) + r.SetDependentRules(depMap.dependents(r)) + r.SetDependencyRules(depMap.dependencies(r)) } } +// ConcurrentRules represents a slice of indexes of rules that can be evaluated concurrently. +type ConcurrentRules []int + // RuleConcurrencyController controls concurrency for rules that are safe to be evaluated concurrently. // Its purpose is to bound the amount of concurrency in rule evaluations to avoid overwhelming the Prometheus // server with additional query load. Concurrency is controlled globally, not on a per-group basis. type RuleConcurrencyController interface { + // SplitGroupIntoBatches returns an ordered slice of of ConcurrentRules, which are batches of rules that can be evaluated concurrently. + // The rules are represented by their index from the input rule group. + SplitGroupIntoBatches(ctx context.Context, group *Group) []ConcurrentRules + // Allow determines if the given rule is allowed to be evaluated concurrently. // If Allow() returns true, then Done() must be called to release the acquired slot and corresponding cleanup is done. // It is important that both *Group and Rule are not retained and only be used for the duration of the call. @@ -490,21 +509,51 @@ func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyControlle } func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, rule Rule) bool { - // To allow a rule to be executed concurrently, we need 3 conditions: - // 1. The rule must not have any rules that depend on it. - // 2. The rule itself must not depend on any other rules. - // 3. If 1 & 2 are true, then and only then we should try to acquire the concurrency slot. - if rule.NoDependentRules() && rule.NoDependencyRules() { - return c.sema.TryAcquire(1) + return c.sema.TryAcquire(1) +} + +func (c *concurrentRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules { + // Using the rule dependency controller information (rules being identified as having no dependencies or no dependants), + // we can safely run the following concurrent groups: + // 1. Concurrently, all rules that have no dependencies + // 2. Sequentially, all rules that have both dependencies and dependants + // 3. Concurrently, all rules that have no dependants + + var noDependencies []int + var dependenciesAndDependants []int + var noDependants []int + + for i, r := range g.rules { + switch { + case r.NoDependencyRules(): + noDependencies = append(noDependencies, i) + case !r.NoDependentRules() && !r.NoDependencyRules(): + dependenciesAndDependants = append(dependenciesAndDependants, i) + case r.NoDependentRules(): + noDependants = append(noDependants, i) + } } - return false + var order []ConcurrentRules + if len(noDependencies) > 0 { + order = append(order, noDependencies) + } + for _, r := range dependenciesAndDependants { + order = append(order, []int{r}) + } + if len(noDependants) > 0 { + order = append(order, noDependants) + } + + return order } func (c *concurrentRuleEvalController) Done(_ context.Context) { c.sema.Release(1) } +var _ RuleConcurrencyController = &sequentialRuleEvalController{} + // sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially. type sequentialRuleEvalController struct{} @@ -512,6 +561,10 @@ func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) return false } +func (c sequentialRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules { + return nil +} + func (c sequentialRuleEvalController) Done(_ context.Context) {} // FromMaps returns new sorted Labels from the given maps, overriding each other in order. diff --git a/vendor/github.com/prometheus/prometheus/rules/recording.go b/vendor/github.com/prometheus/prometheus/rules/recording.go index 52c2a875ab5..3b6db210af1 100644 --- a/vendor/github.com/prometheus/prometheus/rules/recording.go +++ b/vendor/github.com/prometheus/prometheus/rules/recording.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net/url" + "sync" "time" "go.uber.org/atomic" @@ -43,8 +44,9 @@ type RecordingRule struct { // Duration of how long it took to evaluate the recording rule. evaluationDuration *atomic.Duration - noDependentRules *atomic.Bool - noDependencyRules *atomic.Bool + dependenciesMutex sync.RWMutex + dependentRules []Rule + dependencyRules []Rule } // NewRecordingRule returns a new recording rule. @@ -57,8 +59,6 @@ func NewRecordingRule(name string, vector parser.Expr, lset labels.Labels) *Reco evaluationTimestamp: atomic.NewTime(time.Time{}), evaluationDuration: atomic.NewDuration(0), lastError: atomic.NewError(nil), - noDependentRules: atomic.NewBool(false), - noDependencyRules: atomic.NewBool(false), } } @@ -172,18 +172,52 @@ func (rule *RecordingRule) GetEvaluationTimestamp() time.Time { return rule.evaluationTimestamp.Load() } -func (rule *RecordingRule) SetNoDependentRules(noDependentRules bool) { - rule.noDependentRules.Store(noDependentRules) +func (rule *RecordingRule) SetDependentRules(dependents []Rule) { + rule.dependenciesMutex.Lock() + defer rule.dependenciesMutex.Unlock() + + rule.dependentRules = make([]Rule, len(dependents)) + copy(rule.dependentRules, dependents) } func (rule *RecordingRule) NoDependentRules() bool { - return rule.noDependentRules.Load() + rule.dependenciesMutex.RLock() + defer rule.dependenciesMutex.RUnlock() + + if rule.dependentRules == nil { + return false // We don't know if there are dependent rules. + } + + return len(rule.dependentRules) == 0 +} + +func (rule *RecordingRule) DependentRules() []Rule { + rule.dependenciesMutex.RLock() + defer rule.dependenciesMutex.RUnlock() + return rule.dependentRules } -func (rule *RecordingRule) SetNoDependencyRules(noDependencyRules bool) { - rule.noDependencyRules.Store(noDependencyRules) +func (rule *RecordingRule) SetDependencyRules(dependencies []Rule) { + rule.dependenciesMutex.Lock() + defer rule.dependenciesMutex.Unlock() + + rule.dependencyRules = make([]Rule, len(dependencies)) + copy(rule.dependencyRules, dependencies) } func (rule *RecordingRule) NoDependencyRules() bool { - return rule.noDependencyRules.Load() + rule.dependenciesMutex.RLock() + defer rule.dependenciesMutex.RUnlock() + + if rule.dependencyRules == nil { + return false // We don't know if there are dependency rules. + } + + return len(rule.dependencyRules) == 0 +} + +func (rule *RecordingRule) DependencyRules() []Rule { + rule.dependenciesMutex.RLock() + defer rule.dependenciesMutex.RUnlock() + return rule.dependencyRules } diff --git a/vendor/github.com/prometheus/prometheus/rules/rule.go b/vendor/github.com/prometheus/prometheus/rules/rule.go index 687c03d000d..33f1755ac52 100644 --- a/vendor/github.com/prometheus/prometheus/rules/rule.go +++ b/vendor/github.com/prometheus/prometheus/rules/rule.go @@ -62,19 +62,25 @@ type Rule interface { // NOTE: Used dynamically by rules.html template. GetEvaluationTimestamp() time.Time - // SetNoDependentRules sets whether there's no other rule in the rule group that depends on this rule. - SetNoDependentRules(bool) + // SetDependentRules sets rules which depend on the output of this rule. + SetDependentRules(rules []Rule) // NoDependentRules returns true if it's guaranteed that in the rule group there's no other rule // which depends on this one. In case this function returns false there's no such guarantee, which // means there may or may not be other rules depending on this one. NoDependentRules() bool - // SetNoDependencyRules sets whether this rule doesn't depend on the output of any rule in the rule group. - SetNoDependencyRules(bool) + // DependentRules returns the rules which depend on the output of this rule. + DependentRules() []Rule + + // SetDependencyRules sets rules on which this rule depends. + SetDependencyRules(rules []Rule) // NoDependencyRules returns true if it's guaranteed that this rule doesn't depend on the output of // any other rule in the group. In case this function returns false there's no such guarantee, which // means the rule may or may not depend on other rules. NoDependencyRules() bool + + // DependencyRules returns the rules on which this rule depends. + DependencyRules() []Rule } diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index 04da3162e62..5ef5dccb99d 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -107,7 +107,7 @@ type Manager struct { scrapeConfigs map[string]*config.ScrapeConfig scrapePools map[string]*scrapePool newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error) - scrapeFailureLoggers map[string]*logging.JSONFileLogger + scrapeFailureLoggers map[string]FailureLogger targetSets map[string][]*targetgroup.Group buffers *pool.Pool @@ -249,7 +249,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { } c := make(map[string]*config.ScrapeConfig) - scrapeFailureLoggers := map[string]*logging.JSONFileLogger{ + scrapeFailureLoggers := map[string]FailureLogger{ "": nil, // Emptying the file name sets the scrape logger to nil. } for _, scfg := range scfgs { @@ -257,7 +257,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { if _, ok := scrapeFailureLoggers[scfg.ScrapeFailureLogFile]; !ok { // We promise to reopen the file on each reload. var ( - logger *logging.JSONFileLogger + logger FailureLogger err error ) if m.newScrapeFailureLogger != nil { diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 4803354cf6f..2e95ee2282f 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -29,6 +29,7 @@ import ( "strings" "sync" "time" + "unsafe" "github.com/klauspost/compress/gzip" config_util "github.com/prometheus/common/config" @@ -61,6 +62,15 @@ var AlignScrapeTimestamps = true var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels.MetricName) +var _ FailureLogger = (*logging.JSONFileLogger)(nil) + +// FailureLogger is an interface that can be used to log all failed +// scrapes. +type FailureLogger interface { + slog.Handler + io.Closer +} + // scrapePool manages scrapes for sets of targets. type scrapePool struct { appendable storage.Appendable @@ -90,7 +100,7 @@ type scrapePool struct { metrics *scrapeMetrics - scrapeFailureLogger *logging.JSONFileLogger + scrapeFailureLogger FailureLogger scrapeFailureLoggerMtx sync.RWMutex } @@ -223,11 +233,11 @@ func (sp *scrapePool) DroppedTargetsCount() int { return sp.droppedTargetsCount } -func (sp *scrapePool) SetScrapeFailureLogger(l *logging.JSONFileLogger) { +func (sp *scrapePool) SetScrapeFailureLogger(l FailureLogger) { sp.scrapeFailureLoggerMtx.Lock() defer sp.scrapeFailureLoggerMtx.Unlock() if l != nil { - l.With("job_name", sp.config.JobName) + l = slog.New(l).With("job_name", sp.config.JobName).Handler().(FailureLogger) } sp.scrapeFailureLogger = l @@ -238,7 +248,7 @@ func (sp *scrapePool) SetScrapeFailureLogger(l *logging.JSONFileLogger) { } } -func (sp *scrapePool) getScrapeFailureLogger() *logging.JSONFileLogger { +func (sp *scrapePool) getScrapeFailureLogger() FailureLogger { sp.scrapeFailureLoggerMtx.RLock() defer sp.scrapeFailureLoggerMtx.RUnlock() return sp.scrapeFailureLogger @@ -450,7 +460,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { switch { case nonEmpty: all = append(all, t) - case !t.discoveredLabels.IsEmpty(): + default: if sp.config.KeepDroppedTargets == 0 || uint(len(sp.droppedTargets)) < sp.config.KeepDroppedTargets { sp.droppedTargets = append(sp.droppedTargets, t) } @@ -553,9 +563,9 @@ func (sp *scrapePool) sync(targets []*Target) { if _, ok := uniqueLoops[hash]; !ok { uniqueLoops[hash] = nil } - // Need to keep the most updated labels information - // for displaying it in the Service Discovery web page. - sp.activeTargets[hash].SetDiscoveredLabels(t.DiscoveredLabels()) + // Need to keep the most updated ScrapeConfig for + // displaying labels in the Service Discovery web page. + sp.activeTargets[hash].SetScrapeConfig(sp.config, t.tLabels, t.tgLabels) } } @@ -791,7 +801,7 @@ func acceptEncodingHeader(enableCompression bool) string { return "identity" } -var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) +var UserAgent = version.PrometheusUserAgent() func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) { if s.req == nil { @@ -865,7 +875,7 @@ func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w type loop interface { run(errc chan<- error) setForcedError(err error) - setScrapeFailureLogger(*logging.JSONFileLogger) + setScrapeFailureLogger(FailureLogger) stop() getCache() *scrapeCache disableEndOfRunStalenessMarkers() @@ -881,7 +891,7 @@ type cacheEntry struct { type scrapeLoop struct { scraper scraper l *slog.Logger - scrapeFailureLogger *logging.JSONFileLogger + scrapeFailureLogger FailureLogger scrapeFailureLoggerMtx sync.RWMutex cache *scrapeCache lastScrapeSize int @@ -931,6 +941,7 @@ type scrapeLoop struct { // scrapeCache tracks mappings of exposed metric strings to label sets and // storage references. Additionally, it tracks staleness of series between // scrapes. +// Cache is meant to be used per a single target. type scrapeCache struct { iter uint64 // Current scrape iteration. @@ -951,8 +962,10 @@ type scrapeCache struct { seriesCur map[uint64]labels.Labels seriesPrev map[uint64]labels.Labels - metaMtx sync.Mutex - metadata map[string]*metaEntry + // TODO(bwplotka): Consider moving Metadata API to use WAL instead of scrape loop to + // avoid locking (using metadata API can block scraping). + metaMtx sync.Mutex // Mutex is needed due to api touching it when metadata is queried. + metadata map[string]*metaEntry // metadata by metric family name. metrics *scrapeMetrics } @@ -1078,73 +1091,79 @@ func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) { } } -func (c *scrapeCache) setType(metric []byte, t model.MetricType) { +func yoloString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +func (c *scrapeCache) setType(mfName []byte, t model.MetricType) ([]byte, *metaEntry) { c.metaMtx.Lock() + defer c.metaMtx.Unlock() - e, ok := c.metadata[string(metric)] + e, ok := c.metadata[yoloString(mfName)] if !ok { e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} - c.metadata[string(metric)] = e + c.metadata[string(mfName)] = e } if e.Type != t { e.Type = t e.lastIterChange = c.iter } e.lastIter = c.iter - - c.metaMtx.Unlock() + return mfName, e } -func (c *scrapeCache) setHelp(metric, help []byte) { +func (c *scrapeCache) setHelp(mfName, help []byte) ([]byte, *metaEntry) { c.metaMtx.Lock() + defer c.metaMtx.Unlock() - e, ok := c.metadata[string(metric)] + e, ok := c.metadata[yoloString(mfName)] if !ok { e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} - c.metadata[string(metric)] = e + c.metadata[string(mfName)] = e } if e.Help != string(help) { e.Help = string(help) e.lastIterChange = c.iter } e.lastIter = c.iter - - c.metaMtx.Unlock() + return mfName, e } -func (c *scrapeCache) setUnit(metric, unit []byte) { +func (c *scrapeCache) setUnit(mfName, unit []byte) ([]byte, *metaEntry) { c.metaMtx.Lock() + defer c.metaMtx.Unlock() - e, ok := c.metadata[string(metric)] + e, ok := c.metadata[yoloString(mfName)] if !ok { e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} - c.metadata[string(metric)] = e + c.metadata[string(mfName)] = e } if e.Unit != string(unit) { e.Unit = string(unit) e.lastIterChange = c.iter } e.lastIter = c.iter - - c.metaMtx.Unlock() + return mfName, e } -func (c *scrapeCache) GetMetadata(metric string) (MetricMetadata, bool) { +// GetMetadata returns metadata given the metric family name. +func (c *scrapeCache) GetMetadata(mfName string) (MetricMetadata, bool) { c.metaMtx.Lock() defer c.metaMtx.Unlock() - m, ok := c.metadata[metric] + m, ok := c.metadata[mfName] if !ok { return MetricMetadata{}, false } return MetricMetadata{ - Metric: metric, - Type: m.Type, - Help: m.Help, - Unit: m.Unit, + MetricFamily: mfName, + Type: m.Type, + Help: m.Help, + Unit: m.Unit, }, true } +// ListMetadata lists metadata. func (c *scrapeCache) ListMetadata() []MetricMetadata { c.metaMtx.Lock() defer c.metaMtx.Unlock() @@ -1153,16 +1172,16 @@ func (c *scrapeCache) ListMetadata() []MetricMetadata { for m, e := range c.metadata { res = append(res, MetricMetadata{ - Metric: m, - Type: e.Type, - Help: e.Help, - Unit: e.Unit, + MetricFamily: m, + Type: e.Type, + Help: e.Help, + Unit: e.Unit, }) } return res } -// MetadataSize returns the size of the metadata cache. +// SizeMetadata returns the size of the metadata cache. func (c *scrapeCache) SizeMetadata() (s int) { c.metaMtx.Lock() defer c.metaMtx.Unlock() @@ -1173,7 +1192,7 @@ func (c *scrapeCache) SizeMetadata() (s int) { return s } -// MetadataLen returns the number of metadata entries in the cache. +// LengthMetadata returns the number of metadata entries in the cache. func (c *scrapeCache) LengthMetadata() int { c.metaMtx.Lock() defer c.metaMtx.Unlock() @@ -1272,11 +1291,11 @@ func newScrapeLoop(ctx context.Context, return sl } -func (sl *scrapeLoop) setScrapeFailureLogger(l *logging.JSONFileLogger) { +func (sl *scrapeLoop) setScrapeFailureLogger(l FailureLogger) { sl.scrapeFailureLoggerMtx.Lock() defer sl.scrapeFailureLoggerMtx.Unlock() if ts, ok := sl.scraper.(fmt.Stringer); ok && l != nil { - l.With("target", ts.String()) + l = slog.New(l).With("target", ts.String()).Handler().(FailureLogger) } sl.scrapeFailureLogger = l } @@ -1426,7 +1445,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er sl.l.Debug("Scrape failed", "err", scrapeErr) sl.scrapeFailureLoggerMtx.RLock() if sl.scrapeFailureLogger != nil { - sl.scrapeFailureLogger.Log(context.Background(), slog.LevelError, scrapeErr.Error()) + slog.New(sl.scrapeFailureLogger).Error(scrapeErr.Error()) } sl.scrapeFailureLoggerMtx.RUnlock() if errc != nil { @@ -1607,39 +1626,17 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ) } var ( - appErrs = appendErrors{} - sampleLimitErr error - bucketLimitErr error - lset labels.Labels // escapes to heap so hoisted out of loop - e exemplar.Exemplar // escapes to heap so hoisted out of loop - meta metadata.Metadata - metadataChanged bool + appErrs = appendErrors{} + sampleLimitErr error + bucketLimitErr error + lset labels.Labels // escapes to heap so hoisted out of loop + e exemplar.Exemplar // escapes to heap so hoisted out of loop + lastMeta *metaEntry + lastMFName []byte ) exemplars := make([]exemplar.Exemplar, 0, 1) - // updateMetadata updates the current iteration's metadata object and the - // metadataChanged value if we have metadata in the scrape cache AND the - // labelset is for a new series or the metadata for this series has just - // changed. It returns a boolean based on whether the metadata was updated. - updateMetadata := func(lset labels.Labels, isNewSeries bool) bool { - if !sl.appendMetadataToWAL { - return false - } - - sl.cache.metaMtx.Lock() - defer sl.cache.metaMtx.Unlock() - metaEntry, metaOk := sl.cache.metadata[lset.Get(labels.MetricName)] - if metaOk && (isNewSeries || metaEntry.lastIterChange == sl.cache.iter) { - metadataChanged = true - meta.Type = metaEntry.Type - meta.Unit = metaEntry.Unit - meta.Help = metaEntry.Help - return true - } - return false - } - // Take an appender with limits. app = appender(app, sl.sampleLimit, sl.bucketLimit, sl.maxSchema) @@ -1669,14 +1666,18 @@ loop: break } switch et { + // TODO(bwplotka): Consider changing parser to give metadata at once instead of type, help and unit in separation, ideally on `Series()/Histogram() + // otherwise we can expose metadata without series on metadata API. case textparse.EntryType: - sl.cache.setType(p.Type()) + // TODO(bwplotka): Build meta entry directly instead of locking and updating the map. This will + // allow to properly update metadata when e.g unit was added, then removed; + lastMFName, lastMeta = sl.cache.setType(p.Type()) continue case textparse.EntryHelp: - sl.cache.setHelp(p.Help()) + lastMFName, lastMeta = sl.cache.setHelp(p.Help()) continue case textparse.EntryUnit: - sl.cache.setUnit(p.Unit()) + lastMFName, lastMeta = sl.cache.setUnit(p.Unit()) continue case textparse.EntryComment: continue @@ -1699,26 +1700,19 @@ loop: t = *parsedTimestamp } - // Zero metadata out for current iteration until it's resolved. - meta = metadata.Metadata{} - metadataChanged = false - if sl.cache.getDropped(met) { continue } - ce, ok, seriesAlreadyScraped := sl.cache.get(met) + ce, seriesCached, seriesAlreadyScraped := sl.cache.get(met) var ( ref storage.SeriesRef hash uint64 ) - if ok { + if seriesCached { ref = ce.ref lset = ce.lset hash = ce.hash - - // Update metadata only if it changed in the current iteration. - updateMetadata(lset, false) } else { p.Metric(&lset) hash = lset.Hash() @@ -1747,9 +1741,6 @@ loop: sl.metrics.targetScrapePoolExceededLabelLimits.Inc() break loop } - - // Append metadata for new series if they were present. - updateMetadata(lset, true) } if seriesAlreadyScraped && parsedTimestamp == nil { @@ -1799,7 +1790,7 @@ loop: break loop } - if !ok { + if !seriesCached { if parsedTimestamp == nil || sl.trackTimestampsStaleness { // Bypass staleness logic if there is an explicit timestamp. sl.cache.trackStaleness(hash, lset) @@ -1857,10 +1848,18 @@ loop: sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars)) } - if sl.appendMetadataToWAL && metadataChanged { - if _, merr := app.UpdateMetadata(ref, lset, meta); merr != nil { - // No need to fail the scrape on errors appending metadata. - sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr) + if sl.appendMetadataToWAL && lastMeta != nil { + // Is it new series OR did metadata change for this family? + if !seriesCached || lastMeta.lastIterChange == sl.cache.iter { + // In majority cases we can trust that the current series/histogram is matching the lastMeta and lastMFName. + // However, optional TYPE etc metadata and broken OM text can break this, detect those cases here. + // TODO(bwplotka): Consider moving this to parser as many parser users end up doing this (e.g. CT and NHCB parsing). + if isSeriesPartOfFamily(lset.Get(labels.MetricName), lastMFName, lastMeta.Type) { + if _, merr := app.UpdateMetadata(ref, lset, lastMeta.Metadata); merr != nil { + // No need to fail the scrape on errors appending metadata. + sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", lastMeta.Metadata), "err", merr) + } + } } } } @@ -1896,6 +1895,71 @@ loop: return } +func isSeriesPartOfFamily(mName string, mfName []byte, typ model.MetricType) bool { + mfNameStr := yoloString(mfName) + if !strings.HasPrefix(mName, mfNameStr) { // Fast path. + return false + } + + var ( + gotMFName string + ok bool + ) + switch typ { + case model.MetricTypeCounter: + // Prometheus allows _total, cut it from mf name to support this case. + mfNameStr, _ = strings.CutSuffix(mfNameStr, "_total") + + gotMFName, ok = strings.CutSuffix(mName, "_total") + if !ok { + gotMFName = mName + } + case model.MetricTypeHistogram: + gotMFName, ok = strings.CutSuffix(mName, "_bucket") + if !ok { + gotMFName, ok = strings.CutSuffix(mName, "_sum") + if !ok { + gotMFName, ok = strings.CutSuffix(mName, "_count") + if !ok { + gotMFName = mName + } + } + } + case model.MetricTypeGaugeHistogram: + gotMFName, ok = strings.CutSuffix(mName, "_bucket") + if !ok { + gotMFName, ok = strings.CutSuffix(mName, "_gsum") + if !ok { + gotMFName, ok = strings.CutSuffix(mName, "_gcount") + if !ok { + gotMFName = mName + } + } + } + case model.MetricTypeSummary: + gotMFName, ok = strings.CutSuffix(mName, "_sum") + if !ok { + gotMFName, ok = strings.CutSuffix(mName, "_count") + if !ok { + gotMFName = mName + } + } + case model.MetricTypeInfo: + // Technically prometheus text does not support info type, but we might + // accidentally allow info type in prom parse, so support metric family names + // with the _info explicitly too. + mfNameStr, _ = strings.CutSuffix(mfNameStr, "_info") + + gotMFName, ok = strings.CutSuffix(mName, "_info") + if !ok { + gotMFName = mName + } + default: + gotMFName = mName + } + return mfNameStr == gotMFName +} + // Adds samples to the appender, checking the error, and then returns the # of samples added, // whether the caller should continue to process more samples, and any sample or bucket limit errors. func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) { @@ -1934,17 +1998,80 @@ func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucke } } +// reportSample represents automatically generated timeseries documented in +// https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series +type reportSample struct { + metadata.Metadata + name []byte +} + // The constants are suffixed with the invalid \xff unicode rune to avoid collisions // with scraped metrics in the cache. var ( - scrapeHealthMetricName = []byte("up" + "\xff") - scrapeDurationMetricName = []byte("scrape_duration_seconds" + "\xff") - scrapeSamplesMetricName = []byte("scrape_samples_scraped" + "\xff") - samplesPostRelabelMetricName = []byte("scrape_samples_post_metric_relabeling" + "\xff") - scrapeSeriesAddedMetricName = []byte("scrape_series_added" + "\xff") - scrapeTimeoutMetricName = []byte("scrape_timeout_seconds" + "\xff") - scrapeSampleLimitMetricName = []byte("scrape_sample_limit" + "\xff") - scrapeBodySizeBytesMetricName = []byte("scrape_body_size_bytes" + "\xff") + scrapeHealthMetric = reportSample{ + name: []byte("up" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Health of the scrape target. 1 means the target is healthy, 0 if the scrape failed.", + Unit: "targets", + }, + } + scrapeDurationMetric = reportSample{ + name: []byte("scrape_duration_seconds" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Duration of the last scrape in seconds.", + Unit: "seconds", + }, + } + scrapeSamplesMetric = reportSample{ + name: []byte("scrape_samples_scraped" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Number of samples last scraped.", + Unit: "samples", + }, + } + samplesPostRelabelMetric = reportSample{ + name: []byte("scrape_samples_post_metric_relabeling" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Number of samples remaining after metric relabeling was applied.", + Unit: "samples", + }, + } + scrapeSeriesAddedMetric = reportSample{ + name: []byte("scrape_series_added" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Number of series in the last scrape.", + Unit: "series", + }, + } + scrapeTimeoutMetric = reportSample{ + name: []byte("scrape_timeout_seconds" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "The configured scrape timeout for a target.", + Unit: "seconds", + }, + } + scrapeSampleLimitMetric = reportSample{ + name: []byte("scrape_sample_limit" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "The configured sample limit for a target. Returns zero if there is no limit configured.", + Unit: "samples", + }, + } + scrapeBodySizeBytesMetric = reportSample{ + name: []byte("scrape_body_size_bytes" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "The uncompressed size of the last scrape response, if successful. Scrapes failing because body_size_limit is exceeded report -1, other scrape failures report 0.", + Unit: "bytes", + }, + } ) func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) { @@ -1958,29 +2085,29 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration tim } b := labels.NewBuilderWithSymbolTable(sl.symbolTable) - if err = sl.addReportSample(app, scrapeHealthMetricName, ts, health, b); err != nil { + if err = sl.addReportSample(app, scrapeHealthMetric, ts, health, b); err != nil { return } - if err = sl.addReportSample(app, scrapeDurationMetricName, ts, duration.Seconds(), b); err != nil { + if err = sl.addReportSample(app, scrapeDurationMetric, ts, duration.Seconds(), b); err != nil { return } - if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, float64(scraped), b); err != nil { + if err = sl.addReportSample(app, scrapeSamplesMetric, ts, float64(scraped), b); err != nil { return } - if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, float64(added), b); err != nil { + if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, float64(added), b); err != nil { return } - if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, float64(seriesAdded), b); err != nil { + if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, float64(seriesAdded), b); err != nil { return } if sl.reportExtraMetrics { - if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, sl.timeout.Seconds(), b); err != nil { + if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, sl.timeout.Seconds(), b); err != nil { return } - if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, float64(sl.sampleLimit), b); err != nil { + if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, float64(sl.sampleLimit), b); err != nil { return } - if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, float64(bytes), b); err != nil { + if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, float64(bytes), b); err != nil { return } } @@ -1993,37 +2120,37 @@ func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err er stale := math.Float64frombits(value.StaleNaN) b := labels.NewBuilder(labels.EmptyLabels()) - if err = sl.addReportSample(app, scrapeHealthMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeHealthMetric, ts, stale, b); err != nil { return } - if err = sl.addReportSample(app, scrapeDurationMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeDurationMetric, ts, stale, b); err != nil { return } - if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeSamplesMetric, ts, stale, b); err != nil { return } - if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, stale, b); err != nil { return } - if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, stale, b); err != nil { return } if sl.reportExtraMetrics { - if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, stale, b); err != nil { return } - if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, stale, b); err != nil { return } - if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, stale, b); err != nil { return } } return } -func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v float64, b *labels.Builder) error { - ce, ok, _ := sl.cache.get(s) +func (sl *scrapeLoop) addReportSample(app storage.Appender, s reportSample, t int64, v float64, b *labels.Builder) error { + ce, ok, _ := sl.cache.get(s.name) var ref storage.SeriesRef var lset labels.Labels if ok { @@ -2034,7 +2161,7 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v // with scraped metrics in the cache. // We have to drop it when building the actual metric. b.Reset(labels.EmptyLabels()) - b.Set(labels.MetricName, string(s[:len(s)-1])) + b.Set(labels.MetricName, string(s.name[:len(s.name)-1])) lset = sl.reportSampleMutator(b.Labels()) } @@ -2042,7 +2169,13 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v switch { case err == nil: if !ok { - sl.cache.addRef(s, ref, lset, lset.Hash()) + sl.cache.addRef(s.name, ref, lset, lset.Hash()) + // We only need to add metadata once a scrape target appears. + if sl.appendMetadataToWAL { + if _, merr := app.UpdateMetadata(ref, lset, s.Metadata); merr != nil { + sl.l.Debug("Error when appending metadata in addReportSample", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", s.Metadata), "err", merr) + } + } } return nil case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go index 06d4737ff90..4f576504f09 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/target.go +++ b/vendor/github.com/prometheus/prometheus/scrape/target.go @@ -45,12 +45,12 @@ const ( // Target refers to a singular HTTP or HTTPS endpoint. type Target struct { - // Labels before any processing. - discoveredLabels labels.Labels // Any labels that are added to this target and its metrics. labels labels.Labels - // Additional URL parameters that are part of the target URL. - params url.Values + // ScrapeConfig used to create this target. + scrapeConfig *config.ScrapeConfig + // Target and TargetGroup labels used to create this target. + tLabels, tgLabels model.LabelSet mtx sync.RWMutex lastError error @@ -61,12 +61,13 @@ type Target struct { } // NewTarget creates a reasonably configured target for querying. -func NewTarget(labels, discoveredLabels labels.Labels, params url.Values) *Target { +func NewTarget(labels labels.Labels, scrapeConfig *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) *Target { return &Target{ - labels: labels, - discoveredLabels: discoveredLabels, - params: params, - health: HealthUnknown, + labels: labels, + tLabels: tLabels, + tgLabels: tgLabels, + scrapeConfig: scrapeConfig, + health: HealthUnknown, } } @@ -77,17 +78,17 @@ func (t *Target) String() string { // MetricMetadataStore represents a storage for metadata. type MetricMetadataStore interface { ListMetadata() []MetricMetadata - GetMetadata(metric string) (MetricMetadata, bool) + GetMetadata(mfName string) (MetricMetadata, bool) SizeMetadata() int LengthMetadata() int } -// MetricMetadata is a piece of metadata for a metric. +// MetricMetadata is a piece of metadata for a metric family. type MetricMetadata struct { - Metric string - Type model.MetricType - Help string - Unit string + MetricFamily string + Type model.MetricType + Help string + Unit string } func (t *Target) ListMetadata() []MetricMetadata { @@ -123,14 +124,14 @@ func (t *Target) LengthMetadata() int { } // GetMetadata returns type and help metadata for the given metric. -func (t *Target) GetMetadata(metric string) (MetricMetadata, bool) { +func (t *Target) GetMetadata(mfName string) (MetricMetadata, bool) { t.mtx.RLock() defer t.mtx.RUnlock() if t.metadata == nil { return MetricMetadata{}, false } - return t.metadata.GetMetadata(metric) + return t.metadata.GetMetadata(mfName) } func (t *Target) SetMetadataStore(s MetricMetadataStore) { @@ -168,11 +169,11 @@ func (t *Target) offset(interval time.Duration, offsetSeed uint64) time.Duration } // Labels returns a copy of the set of all public labels of the target. -func (t *Target) Labels(b *labels.ScratchBuilder) labels.Labels { - b.Reset() +func (t *Target) Labels(b *labels.Builder) labels.Labels { + b.Reset(labels.EmptyLabels()) t.labels.Range(func(l labels.Label) { if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) { - b.Add(l.Name, l.Value) + b.Set(l.Name, l.Value) } }) return b.Labels() @@ -188,24 +189,31 @@ func (t *Target) LabelsRange(f func(l labels.Label)) { } // DiscoveredLabels returns a copy of the target's labels before any processing. -func (t *Target) DiscoveredLabels() labels.Labels { +func (t *Target) DiscoveredLabels(lb *labels.Builder) labels.Labels { t.mtx.Lock() - defer t.mtx.Unlock() - return t.discoveredLabels.Copy() + cfg, tLabels, tgLabels := t.scrapeConfig, t.tLabels, t.tgLabels + t.mtx.Unlock() + PopulateDiscoveredLabels(lb, cfg, tLabels, tgLabels) + return lb.Labels() } -// SetDiscoveredLabels sets new DiscoveredLabels. -func (t *Target) SetDiscoveredLabels(l labels.Labels) { +// SetScrapeConfig sets new ScrapeConfig. +func (t *Target) SetScrapeConfig(scrapeConfig *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) { t.mtx.Lock() defer t.mtx.Unlock() - t.discoveredLabels = l + t.scrapeConfig = scrapeConfig + t.tLabels = tLabels + t.tgLabels = tgLabels } // URL returns a copy of the target's URL. func (t *Target) URL() *url.URL { + t.mtx.Lock() + configParams := t.scrapeConfig.Params + t.mtx.Unlock() params := url.Values{} - for k, v := range t.params { + for k, v := range configParams { params[k] = make([]string, len(v)) copy(params[k], v) } @@ -287,12 +295,12 @@ func (t *Target) intervalAndTimeout(defaultInterval, defaultDuration time.Durati intervalLabel := t.labels.Get(model.ScrapeIntervalLabel) interval, err := model.ParseDuration(intervalLabel) if err != nil { - return defaultInterval, defaultDuration, fmt.Errorf("Error parsing interval label %q: %w", intervalLabel, err) + return defaultInterval, defaultDuration, fmt.Errorf("error parsing interval label %q: %w", intervalLabel, err) } timeoutLabel := t.labels.Get(model.ScrapeTimeoutLabel) timeout, err := model.ParseDuration(timeoutLabel) if err != nil { - return defaultInterval, defaultDuration, fmt.Errorf("Error parsing timeout label %q: %w", timeoutLabel, err) + return defaultInterval, defaultDuration, fmt.Errorf("error parsing timeout label %q: %w", timeoutLabel, err) } return time.Duration(interval), time.Duration(timeout), nil @@ -420,10 +428,19 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels return ref, nil } -// PopulateLabels builds a label set from the given label set and scrape configuration. -// It returns a label set before relabeling was applied as the second return value. -// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. -func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { +// PopulateDiscoveredLabels sets base labels on lb from target and group labels and scrape configuration, before relabeling. +func PopulateDiscoveredLabels(lb *labels.Builder, cfg *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) { + lb.Reset(labels.EmptyLabels()) + + for ln, lv := range tLabels { + lb.Set(string(ln), string(lv)) + } + for ln, lv := range tgLabels { + if _, ok := tLabels[ln]; !ok { + lb.Set(string(ln), string(lv)) + } + } + // Copy labels into the labelset for the target if they are not set already. scrapeLabels := []labels.Label{ {Name: model.JobLabel, Value: cfg.JobName}, @@ -444,44 +461,49 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig lab lb.Set(name, v[0]) } } +} - preRelabelLabels := lb.Labels() +// PopulateLabels builds labels from target and group labels and scrape configuration, +// performs defined relabeling, checks validity, and adds Prometheus standard labels such as 'instance'. +// A return of empty labels and nil error means the target was dropped by relabeling. +func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) (res labels.Labels, err error) { + PopulateDiscoveredLabels(lb, cfg, tLabels, tgLabels) keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) // Check if the target was dropped. if !keep { - return labels.EmptyLabels(), preRelabelLabels, nil + return labels.EmptyLabels(), nil } if v := lb.Get(model.AddressLabel); v == "" { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address") + return labels.EmptyLabels(), errors.New("no address") } addr := lb.Get(model.AddressLabel) if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), err + return labels.EmptyLabels(), err } interval := lb.Get(model.ScrapeIntervalLabel) intervalDuration, err := model.ParseDuration(interval) if err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape interval: %w", err) + return labels.EmptyLabels(), fmt.Errorf("error parsing scrape interval: %w", err) } if time.Duration(intervalDuration) == 0 { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape interval cannot be 0") + return labels.EmptyLabels(), errors.New("scrape interval cannot be 0") } timeout := lb.Get(model.ScrapeTimeoutLabel) timeoutDuration, err := model.ParseDuration(timeout) if err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape timeout: %w", err) + return labels.EmptyLabels(), fmt.Errorf("error parsing scrape timeout: %w", err) } if time.Duration(timeoutDuration) == 0 { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape timeout cannot be 0") + return labels.EmptyLabels(), errors.New("scrape timeout cannot be 0") } if timeoutDuration > intervalDuration { - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval) + return labels.EmptyLabels(), fmt.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval) } // Meta labels are deleted after relabelling. Other internal labels propagate to @@ -506,9 +528,9 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig lab return nil }) if err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), err + return labels.EmptyLabels(), err } - return res, preRelabelLabels, nil + return res, nil } // TargetsFromGroup builds targets based on the given TargetGroup and config. @@ -516,24 +538,12 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, targets [ targets = targets[:0] failures := []error{} - for i, tlset := range tg.Targets { - lb.Reset(labels.EmptyLabels()) - - for ln, lv := range tlset { - lb.Set(string(ln), string(lv)) - } - for ln, lv := range tg.Labels { - if _, ok := tlset[ln]; !ok { - lb.Set(string(ln), string(lv)) - } - } - - lset, origLabels, err := PopulateLabels(lb, cfg) + for i, tLabels := range tg.Targets { + lset, err := PopulateLabels(lb, cfg, tLabels, tg.Labels) if err != nil { failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err)) - } - if !lset.IsEmpty() || !origLabels.IsEmpty() { - targets = append(targets, NewTarget(lset, origLabels, cfg.Params)) + } else { + targets = append(targets, NewTarget(lset, cfg, tLabels, tg.Labels)) } } return targets, failures diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go index ad766be9bf4..aadf15307c0 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/client.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go @@ -66,7 +66,7 @@ const ( var ( // UserAgent represents Prometheus version to use for user agent header. - UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) + UserAgent = version.PrometheusUserAgent() remoteWriteContentTypeHeaders = map[config.RemoteWriteProtoMsg]string{ config.RemoteWriteProtoMsgV1: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec. @@ -81,8 +81,8 @@ var ( remoteReadQueriesTotal = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, - Subsystem: subsystem, - Name: "read_queries_total", + Subsystem: "remote_read_client", + Name: "queries_total", Help: "The total number of remote read queries.", }, []string{remoteName, endpoint, "response_type", "code"}, @@ -90,8 +90,8 @@ var ( remoteReadQueries = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, - Subsystem: subsystem, - Name: "remote_read_queries", + Subsystem: "remote_read_client", + Name: "queries", Help: "The number of in-flight remote read queries.", }, []string{remoteName, endpoint}, @@ -99,8 +99,8 @@ var ( remoteReadQueryDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: namespace, - Subsystem: subsystem, - Name: "read_request_duration_seconds", + Subsystem: "remote_read_client", + Name: "request_duration_seconds", Help: "Histogram of the latency for remote read requests. Note that for streamed responses this is only the duration of the initial call and does not include the processing of the stream.", Buckets: append(prometheus.DefBuckets, 25, 60), NativeHistogramBucketFactor: 1.1, diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go index 9306dcb4c28..d7f376c96a8 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go @@ -38,7 +38,7 @@ type Watchable interface { type noopScrapeManager struct{} func (noop *noopScrapeManager) Get() (*scrape.Manager, error) { - return nil, errors.New("Scrape manager not ready") + return nil, errors.New("scrape manager not ready") } // MetadataWatcher watches the Scrape Manager for a given WriteMetadataTo. diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go deleted file mode 100644 index cb9257d0737..00000000000 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: https://github.com/golang/go/blob/f2d118fd5f7e872804a5825ce29797f81a28b0fa/src/strings/strings.go -// Provenance-includes-license: BSD-3-Clause -// Provenance-includes-copyright: Copyright The Go Authors. - -package prometheus - -import "strings" - -// fieldsFunc is a copy of strings.FieldsFunc from the Go standard library, -// but it also returns the separators as part of the result. -func fieldsFunc(s string, f func(rune) bool) ([]string, []string) { - // A span is used to record a slice of s of the form s[start:end]. - // The start index is inclusive and the end index is exclusive. - type span struct { - start int - end int - } - spans := make([]span, 0, 32) - separators := make([]string, 0, 32) - - // Find the field start and end indices. - // Doing this in a separate pass (rather than slicing the string s - // and collecting the result substrings right away) is significantly - // more efficient, possibly due to cache effects. - start := -1 // valid span start if >= 0 - for end, rune := range s { - if f(rune) { - if start >= 0 { - spans = append(spans, span{start, end}) - // Set start to a negative value. - // Note: using -1 here consistently and reproducibly - // slows down this code by a several percent on amd64. - start = ^start - separators = append(separators, string(s[end])) - } - } else { - if start < 0 { - start = end - } - } - } - - // Last field might end at EOF. - if start >= 0 { - spans = append(spans, span{start, len(s)}) - } - - // Create strings from recorded field indices. - a := make([]string, len(spans)) - for i, span := range spans { - a[i] = s[span.start:span.end] - } - - return a, separators -} - -// join is a copy of strings.Join from the Go standard library, -// but it also accepts a slice of separators to join the elements with. -// If the slice of separators is shorter than the slice of elements, use a default value. -// We also don't check for integer overflow. -func join(elems []string, separators []string, def string) string { - switch len(elems) { - case 0: - return "" - case 1: - return elems[0] - } - - var n int - var sep string - sepLen := len(separators) - for i, elem := range elems { - if i >= sepLen { - sep = def - } else { - sep = separators[i] - } - n += len(sep) + len(elem) - } - - var b strings.Builder - b.Grow(n) - b.WriteString(elems[0]) - for i, s := range elems[1:] { - if i >= sepLen { - sep = def - } else { - sep = separators[i] - } - b.WriteString(sep) - b.WriteString(s) - } - return b.String() -} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/metric_name_builder.go similarity index 54% rename from vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go rename to vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/metric_name_builder.go index ab722d5d80a..8b5ea2a0464 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/metric_name_builder.go @@ -78,7 +78,7 @@ var perUnitMap = map[string]string{ "y": "year", } -// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric. +// BuildCompliantMetricName builds a Prometheus-compliant metric name for the specified metric. // // Metric name is prefixed with specified namespace and underscore (if any). // Namespace is not cleaned up. Make sure specified namespace follows Prometheus @@ -87,105 +87,49 @@ var perUnitMap = map[string]string{ // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels, // https://prometheus.io/docs/practices/naming/#metric-and-label-naming // and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. -func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes, allowUTF8 bool) string { +func BuildCompliantMetricName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { // Full normalization following standard Prometheus naming conventions if addMetricSuffixes { - return normalizeName(metric, namespace, allowUTF8) + return normalizeName(metric, namespace) } - var metricName string - if !allowUTF8 { - // Regexp for metric name characters that should be replaced with _. - invalidMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:_]`) - - // Simple case (no full normalization, no units, etc.). - metricName = strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { - return invalidMetricCharRE.MatchString(string(r)) - }), "_") - } else { - metricName = metric.Name() - } + // Simple case (no full normalization, no units, etc.). + metricName := strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { + return invalidMetricCharRE.MatchString(string(r)) + }), "_") // Namespace? if namespace != "" { return namespace + "_" + metricName } - // Metric name starts with a digit and utf8 not allowed? Prefix it with an underscore. - if metricName != "" && unicode.IsDigit(rune(metricName[0])) && !allowUTF8 { + // Metric name starts with a digit? Prefix it with an underscore. + if metricName != "" && unicode.IsDigit(rune(metricName[0])) { metricName = "_" + metricName } return metricName } -var nonMetricNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9:]`) +var ( + nonMetricNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9:]`) + // Regexp for metric name characters that should be replaced with _. + invalidMetricCharRE = regexp.MustCompile(`[^a-zA-Z0-9:_]`) + multipleUnderscoresRE = regexp.MustCompile(`__+`) +) // Build a normalized name for the specified metric. -func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) string { - var nameTokens []string - var separators []string - if !allowUTF8 { - // Split metric name into "tokens" (of supported metric name runes). - // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. - // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. - nameTokens = strings.FieldsFunc( - metric.Name(), - func(r rune) bool { return nonMetricNameCharRE.MatchString(string(r)) }, - ) - } else { - translationFunc := func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' } - // Split metric name into "tokens" (of supported metric name runes). - nameTokens, separators = fieldsFunc(metric.Name(), translationFunc) - } - - // Split unit at the '/' if any - unitTokens := strings.SplitN(metric.Unit(), "/", 2) - - // Main unit - // Append if not blank, doesn't contain '{}', and is not present in metric name already - if len(unitTokens) > 0 { - var mainUnitProm, perUnitProm string - mainUnitOTel := strings.TrimSpace(unitTokens[0]) - if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { - mainUnitProm = unitMapGetOrDefault(mainUnitOTel) - if !allowUTF8 { - mainUnitProm = cleanUpUnit(mainUnitProm) - } - if slices.Contains(nameTokens, mainUnitProm) { - mainUnitProm = "" - } - } - - // Per unit - // Append if not blank, doesn't contain '{}', and is not present in metric name already - if len(unitTokens) > 1 && unitTokens[1] != "" { - perUnitOTel := strings.TrimSpace(unitTokens[1]) - if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { - perUnitProm = perUnitMapGetOrDefault(perUnitOTel) - if !allowUTF8 { - perUnitProm = cleanUpUnit(perUnitProm) - } - } - if perUnitProm != "" { - perUnitProm = "per_" + perUnitProm - if slices.Contains(nameTokens, perUnitProm) { - perUnitProm = "" - } - } - } - - if perUnitProm != "" { - mainUnitProm = strings.TrimSuffix(mainUnitProm, "_") - } +func normalizeName(metric pmetric.Metric, namespace string) string { + // Split metric name into "tokens" (of supported metric name runes). + // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. + // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. + nameTokens := strings.FieldsFunc( + metric.Name(), + func(r rune) bool { return nonMetricNameCharRE.MatchString(string(r)) }, + ) - if mainUnitProm != "" { - nameTokens = append(nameTokens, mainUnitProm) - } - if perUnitProm != "" { - nameTokens = append(nameTokens, perUnitProm) - } - } + mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(metric.Unit()) + nameTokens = addUnitTokens(nameTokens, cleanUpUnit(mainUnitSuffix), cleanUpUnit(perUnitSuffix)) // Append _total for Counters if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() { @@ -206,14 +150,8 @@ func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) stri nameTokens = append([]string{namespace}, nameTokens...) } - var normalizedName string - if !allowUTF8 { - // Build the string from the tokens, separated with underscores - normalizedName = strings.Join(nameTokens, "_") - } else { - // Build the string from the tokens + separators. - normalizedName = join(nameTokens, separators, "_") - } + // Build the string from the tokens, separated with underscores + normalizedName := strings.Join(nameTokens, "_") // Metric name cannot start with a digit, so prefix it with "_" in this case if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) { @@ -223,71 +161,43 @@ func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) stri return normalizedName } -// TrimPromSuffixes trims type and unit prometheus suffixes from a metric name. -// Following the [OpenTelemetry specs] for converting Prometheus Metric points to OTLP. +// addUnitTokens will add the suffixes to the nameTokens if they are not already present. +// It will also remove trailing underscores from the main suffix to avoid double underscores +// when joining the tokens. // -// [OpenTelemetry specs]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#metric-metadata -func TrimPromSuffixes(promName string, metricType pmetric.MetricType, unit string) string { - nameTokens := strings.Split(promName, "_") - if len(nameTokens) == 1 { - return promName +// If the 'per' unit ends with underscore, the underscore will be removed. If the per unit is just +// 'per_', it will be entirely removed. +func addUnitTokens(nameTokens []string, mainUnitSuffix, perUnitSuffix string) []string { + if slices.Contains(nameTokens, mainUnitSuffix) { + mainUnitSuffix = "" } - nameTokens = removeTypeSuffixes(nameTokens, metricType) - nameTokens = removeUnitSuffixes(nameTokens, unit) - - return strings.Join(nameTokens, "_") -} - -func removeTypeSuffixes(tokens []string, metricType pmetric.MetricType) []string { - switch metricType { - case pmetric.MetricTypeSum: - // Only counters are expected to have a type suffix at this point. - // for other types, suffixes are removed during scrape. - return removeSuffix(tokens, "total") - default: - return tokens + if perUnitSuffix == "per_" { + perUnitSuffix = "" + } else { + perUnitSuffix = strings.TrimSuffix(perUnitSuffix, "_") + if slices.Contains(nameTokens, perUnitSuffix) { + perUnitSuffix = "" + } } -} - -func removeUnitSuffixes(nameTokens []string, unit string) []string { - l := len(nameTokens) - unitTokens := strings.Split(unit, "_") - lu := len(unitTokens) - if lu == 0 || l <= lu { - return nameTokens + if perUnitSuffix != "" { + mainUnitSuffix = strings.TrimSuffix(mainUnitSuffix, "_") } - suffixed := true - for i := range unitTokens { - if nameTokens[l-i-1] != unitTokens[lu-i-1] { - suffixed = false - break - } + if mainUnitSuffix != "" { + nameTokens = append(nameTokens, mainUnitSuffix) } - - if suffixed { - return nameTokens[:l-lu] + if perUnitSuffix != "" { + nameTokens = append(nameTokens, perUnitSuffix) } - return nameTokens } -func removeSuffix(tokens []string, suffix string) []string { - l := len(tokens) - if tokens[l-1] == suffix { - return tokens[:l-1] - } - - return tokens -} - // cleanUpUnit cleans up unit so it matches model.LabelNameRE. func cleanUpUnit(unit string) string { // Multiple consecutive underscores are replaced with a single underscore. // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. - multipleUnderscoresRE := regexp.MustCompile(`__+`) return strings.TrimPrefix(multipleUnderscoresRE.ReplaceAllString( nonMetricNameCharRE.ReplaceAllString(unit, "_"), "_", @@ -322,3 +232,75 @@ func removeItem(slice []string, value string) []string { } return newSlice } + +// BuildMetricName builds a valid metric name but without following Prometheus naming conventions. +// It doesn't do any character transformation, it only prefixes the metric name with the namespace, if any, +// and adds metric type suffixes, e.g. "_total" for counters and unit suffixes. +// +// Differently from BuildCompliantMetricName, it doesn't check for the presence of unit and type suffixes. +// If "addMetricSuffixes" is true, it will add them anyway. +// +// Please use BuildCompliantMetricName for a metric name that follows Prometheus naming conventions. +func BuildMetricName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { + metricName := metric.Name() + + if namespace != "" { + metricName = namespace + "_" + metricName + } + + if addMetricSuffixes { + mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(metric.Unit()) + if mainUnitSuffix != "" { + metricName = metricName + "_" + mainUnitSuffix + } + if perUnitSuffix != "" { + metricName = metricName + "_" + perUnitSuffix + } + + // Append _total for Counters + if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() { + metricName = metricName + "_total" + } + + // Append _ratio for metrics with unit "1" + // Some OTel receivers improperly use unit "1" for counters of objects + // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions + // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY + // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) + if metric.Unit() == "1" && metric.Type() == pmetric.MetricTypeGauge { + metricName = metricName + "_ratio" + } + } + return metricName +} + +// buildUnitSuffixes builds the main and per unit suffixes for the specified unit +// but doesn't do any special character transformation to accommodate Prometheus naming conventions. +// Removing trailing underscores or appending suffixes is done in the caller. +func buildUnitSuffixes(unit string) (mainUnitSuffix, perUnitSuffix string) { + // Split unit at the '/' if any + unitTokens := strings.SplitN(unit, "/", 2) + + if len(unitTokens) > 0 { + // Main unit + // Update if not blank and doesn't contain '{}' + mainUnitOTel := strings.TrimSpace(unitTokens[0]) + if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { + mainUnitSuffix = unitMapGetOrDefault(mainUnitOTel) + } + + // Per unit + // Update if not blank and doesn't contain '{}' + if len(unitTokens) > 1 && unitTokens[1] != "" { + perUnitOTel := strings.TrimSpace(unitTokens[1]) + if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { + perUnitSuffix = perUnitMapGetOrDefault(perUnitOTel) + } + if perUnitSuffix != "" { + perUnitSuffix = "per_" + perUnitSuffix + } + } + } + + return mainUnitSuffix, perUnitSuffix +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 6779c9ed804..1545accf2fb 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -96,7 +96,12 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric continue } - promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes, settings.AllowUTF8) + var promName string + if settings.AllowUTF8 { + promName = prometheustranslator.BuildMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) + } else { + promName = prometheustranslator.BuildCompliantMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) + } c.metadata = append(c.metadata, prompb.MetricMetadata{ Type: otelMetricTypeToPromMetricType(metric), MetricFamilyName: promName, diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index 475c126eff3..b274707bfff 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -550,7 +550,7 @@ func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scr mm := make([]prompb.MetricMetadata, 0, len(metadata)) for _, entry := range metadata { mm = append(mm, prompb.MetricMetadata{ - MetricFamilyName: entry.Metric, + MetricFamilyName: entry.MetricFamily, Help: entry.Help, Type: prompb.FromMetadataType(entry.Type), Unit: entry.Unit, @@ -1919,12 +1919,17 @@ func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, var nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata int for nPending, d := range batch { pendingData[nPending].Samples = pendingData[nPending].Samples[:0] - // todo: should we also safeguard against empty metadata here? if d.metadata != nil { pendingData[nPending].Metadata.Type = writev2.FromMetadataType(d.metadata.Type) pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Help) - pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Unit) + pendingData[nPending].Metadata.UnitRef = symbolTable.Symbolize(d.metadata.Unit) nPendingMetadata++ + } else { + // Safeguard against sending garbage in case of not having metadata + // for whatever reason. + pendingData[nPending].Metadata.Type = writev2.Metadata_METRIC_TYPE_UNSPECIFIED + pendingData[nPending].Metadata.HelpRef = 0 + pendingData[nPending].Metadata.UnitRef = 0 } if sendExemplars { @@ -2119,7 +2124,7 @@ func compressPayload(tmpbuf *[]byte, inp []byte, enc Compression) (compressed [] } return compressed, nil default: - return compressed, fmt.Errorf("Unknown compression scheme [%v]", enc) + return compressed, fmt.Errorf("unknown compression scheme [%v]", enc) } } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go index 8f2945f9740..3e315a6157e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go @@ -56,10 +56,10 @@ func NewReadHandler(logger *slog.Logger, r prometheus.Registerer, queryable stor marshalPool: &sync.Pool{}, queries: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "prometheus", - Subsystem: "api", // TODO: changes to storage in Prometheus 3.0. - Name: "remote_read_queries", - Help: "The current number of remote read queries being executed or waiting.", + Namespace: namespace, + Subsystem: "remote_read_handler", + Name: "queries", + Help: "The current number of remote read queries that are either in execution or queued on the handler.", }), } if r != nil { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index afb50ef2652..02585539c0b 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -38,6 +38,13 @@ import ( writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage" otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite" + + deltatocumulative "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/otel/metric/noop" ) type writeHandler struct { @@ -48,6 +55,8 @@ type writeHandler struct { samplesAppendedWithoutMetadata prometheus.Counter acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{} + + ingestCTZeroSample bool } const maxAheadTime = 10 * time.Minute @@ -57,7 +66,7 @@ const maxAheadTime = 10 * time.Minute // // NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible // as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write. -func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { +func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg, ingestCTZeroSample bool) http.Handler { protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{} for _, acc := range acceptedProtoMsgs { protoMsgs[acc] = struct{}{} @@ -78,6 +87,8 @@ func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable Name: "remote_write_without_metadata_appended_samples_total", Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.", }), + + ingestCTZeroSample: ingestCTZeroSample, } return h } @@ -394,6 +405,17 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * var ref storage.SeriesRef // Samples. + if h.ingestCTZeroSample && len(ts.Samples) > 0 && ts.Samples[0].Timestamp != 0 && ts.CreatedTimestamp != 0 { + // CT only needs to be ingested for the first sample, it will be considered + // out of order for the rest. + ref, err = app.AppendCTZeroSample(ref, ls, ts.Samples[0].Timestamp, ts.CreatedTimestamp) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { + // Even for the first sample OOO is a common scenario because + // we can't tell if a CT was already ingested in a previous request. + // We ignore the error. + h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", ts.Samples[0].Timestamp) + } + } for _, s := range ts.Samples { ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue()) if err == nil { @@ -415,6 +437,17 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // Native Histograms. for _, hp := range ts.Histograms { + if h.ingestCTZeroSample && hp.Timestamp != 0 && ts.CreatedTimestamp != 0 { + // Differently from samples, we need to handle CT for each histogram instead of just the first one. + // This is because histograms and float histograms are stored separately, even if they have the same labels. + ref, err = h.handleHistogramZeroSample(app, ref, ls, hp, ts.CreatedTimestamp) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { + // Even for the first sample OOO is a common scenario because + // we can't tell if a CT was already ingested in a previous request. + // We ignore the error. + h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", hp.Timestamp) + } + } if hp.IsFloatHistogram() { ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, nil, hp.ToFloatHistogram()) } else { @@ -479,56 +512,119 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * return samplesWithoutMetadata, http.StatusBadRequest, errors.Join(badRequestErrs...) } +// handleHistogramZeroSample appends CT as a zero-value sample with CT value as the sample timestamp. +// It doens't return errors in case of out of order CT. +func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, ct int64) (storage.SeriesRef, error) { + var err error + if hist.IsFloatHistogram() { + ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, nil, hist.ToFloatHistogram()) + } else { + ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, hist.ToIntHistogram(), nil) + } + return ref, err +} + +type OTLPOptions struct { + // Convert delta samples to their cumulative equivalent by aggregating in-memory + ConvertDelta bool +} + // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. -func NewOTLPWriteHandler(logger *slog.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler { - rwHandler := &writeHandler{ - logger: logger, - appendable: appendable, +func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler { + ex := &rwExporter{ + writeHandler: &writeHandler{ + logger: logger, + appendable: appendable, + }, + config: configFunc, } - return &otlpWriteHandler{ - logger: logger, - rwHandler: rwHandler, - configFunc: configFunc, + wh := &otlpWriteHandler{logger: logger, cumul: ex} + + if opts.ConvertDelta { + fac := deltatocumulative.NewFactory() + set := processor.Settings{TelemetrySettings: component.TelemetrySettings{MeterProvider: noop.NewMeterProvider()}} + d2c, err := fac.CreateMetrics(context.Background(), set, fac.CreateDefaultConfig(), wh.cumul) + if err != nil { + // fac.CreateMetrics directly calls [deltatocumulativeprocessor.createMetricsProcessor], + // which only errors if: + // - cfg.(type) != *Config + // - telemetry.New fails due to bad set.TelemetrySettings + // + // both cannot be the case, as we pass a valid *Config and valid TelemetrySettings. + // as such, we assume this error to never occur. + // if it is, our assumptions are broken in which case a panic seems acceptable. + panic(err) + } + if err := d2c.Start(context.Background(), nil); err != nil { + // deltatocumulative does not error on start. see above for panic reasoning + panic(err) + } + wh.delta = d2c } -} -type otlpWriteHandler struct { - logger *slog.Logger - rwHandler *writeHandler - configFunc func() config.Config + return wh } -func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - req, err := DecodeOTLPWriteRequest(r) - if err != nil { - h.logger.Error("Error decoding remote write request", "err", err.Error()) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } +type rwExporter struct { + *writeHandler + config func() config.Config +} - otlpCfg := h.configFunc().OTLPConfig +func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { + otlpCfg := rw.config().OTLPConfig converter := otlptranslator.NewPrometheusConverter() - annots, err := converter.FromMetrics(r.Context(), req.Metrics(), otlptranslator.Settings{ + annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{ AddMetricSuffixes: true, AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes, PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes, }) if err != nil { - h.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) + rw.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) } ws, _ := annots.AsStrings("", 0, 0) if len(ws) > 0 { - h.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) + rw.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) } - err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{ + err = rw.write(ctx, &prompb.WriteRequest{ Timeseries: converter.TimeSeries(), Metadata: converter.Metadata(), }) + return err +} + +func (rw *rwExporter) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: false} +} + +type otlpWriteHandler struct { + logger *slog.Logger + + cumul consumer.Metrics // only cumulative + delta consumer.Metrics // delta capable +} + +func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + req, err := DecodeOTLPWriteRequest(r) + if err != nil { + h.logger.Error("Error decoding OTLP write request", "err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + md := req.Metrics() + // if delta conversion enabled AND delta samples exist, use slower delta capable path + if h.delta != nil && hasDelta(md) { + err = h.delta.ConsumeMetrics(r.Context(), md) + } else { + // deltatocumulative currently holds a sync.Mutex when entering ConsumeMetrics. + // This is slow and not necessary when no delta samples exist anyways + err = h.cumul.ConsumeMetrics(r.Context(), md) + } switch { case err == nil: @@ -545,6 +641,31 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) } +func hasDelta(md pmetric.Metrics) bool { + for i := range md.ResourceMetrics().Len() { + sms := md.ResourceMetrics().At(i).ScopeMetrics() + for i := range sms.Len() { + ms := sms.At(i).Metrics() + for i := range ms.Len() { + temporality := pmetric.AggregationTemporalityUnspecified + m := ms.At(i) + switch ms.At(i).Type() { + case pmetric.MetricTypeSum: + temporality = m.Sum().AggregationTemporality() + case pmetric.MetricTypeExponentialHistogram: + temporality = m.ExponentialHistogram().AggregationTemporality() + case pmetric.MetricTypeHistogram: + temporality = m.Histogram().AggregationTemporality() + } + if temporality == pmetric.AggregationTemporalityDelta { + return true + } + } + } + } + return false +} + type timeLimitAppender struct { storage.Appender diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go index 0698c6c8ac7..25b65eb577f 100644 --- a/vendor/github.com/prometheus/prometheus/template/template.go +++ b/vendor/github.com/prometheus/prometheus/template/template.go @@ -30,6 +30,8 @@ import ( "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "golang.org/x/text/cases" + "golang.org/x/text/language" common_templates "github.com/prometheus/common/helpers/templates" @@ -166,7 +168,7 @@ func NewTemplateExpander( return html_template.HTML(text) }, "match": regexp.MatchString, - "title": strings.Title, //nolint:staticcheck // TODO(beorn7): Need to come up with a replacement using the cases package. + "title": cases.Title(language.AmericanEnglish, cases.NoLower).String, "toUpper": strings.ToUpper, "toLower": strings.ToLower, "graphLink": strutil.GraphLinkForExpression, diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index 6483f8d8bbe..4ffd2463c3c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -221,7 +221,7 @@ type BlockMetaCompaction struct { } func (bm *BlockMetaCompaction) SetOutOfOrder() { - if bm.containsHint(CompactionHintFromOutOfOrder) { + if bm.FromOutOfOrder() { return } bm.Hints = append(bm.Hints, CompactionHintFromOutOfOrder) @@ -229,16 +229,7 @@ func (bm *BlockMetaCompaction) SetOutOfOrder() { } func (bm *BlockMetaCompaction) FromOutOfOrder() bool { - return bm.containsHint(CompactionHintFromOutOfOrder) -} - -func (bm *BlockMetaCompaction) containsHint(hint string) bool { - for _, h := range bm.Hints { - if h == hint { - return true - } - } - return false + return slices.Contains(bm.Hints, CompactionHintFromOutOfOrder) } const ( diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 2cfa4f36382..2d35e3fb007 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -2320,7 +2320,7 @@ func isTmpDir(fi fs.DirEntry) bool { fn := fi.Name() ext := filepath.Ext(fn) if ext == tmpForDeletionBlockDirSuffix || ext == tmpForCreationBlockDirSuffix || ext == tmpLegacy { - if strings.HasPrefix(fn, "checkpoint.") { + if strings.HasPrefix(fn, wlog.CheckpointPrefix) { return true } if strings.HasPrefix(fn, chunkSnapshotPrefix) { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 47f85d77137..4fbb4b5710b 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -1730,6 +1730,11 @@ func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labe h.numSeries.Inc() h.postings.Add(storage.SeriesRef(id), lset) + + // Adding the series in the postings marks the creation of series + // as any further calls to this and the read methods would return that series. + h.series.postCreation(lset) + return s, true, nil } @@ -2037,9 +2042,6 @@ func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries fu // The callback prevented creation of series. return nil, false, preCreationErr } - // Setting the series in the s.hashes marks the creation of series - // as any further calls to this methods would return that series. - s.seriesLifecycleCallback.PostCreation(series.labels()) i = uint64(series.ref) & uint64(s.size-1) @@ -2050,6 +2052,10 @@ func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries fu return series, true, nil } +func (s *stripeSeries) postCreation(lset labels.Labels) { + s.seriesLifecycleCallback.PostCreation(lset) +} + type sample struct { t int64 f float64 diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index ea2a163f261..c94c42bc532 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -943,17 +943,37 @@ func (a *headAppender) log() error { } } if len(a.histograms) > 0 { - rec = enc.HistogramSamples(a.histograms, buf) + var customBucketsHistograms []record.RefHistogramSample + rec, customBucketsHistograms = enc.HistogramSamples(a.histograms, buf) buf = rec[:0] - if err := a.head.wal.Log(rec); err != nil { - return fmt.Errorf("log histograms: %w", err) + if len(rec) > 0 { + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log histograms: %w", err) + } + } + + if len(customBucketsHistograms) > 0 { + rec = enc.CustomBucketsHistogramSamples(customBucketsHistograms, buf) + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log custom buckets histograms: %w", err) + } } } if len(a.floatHistograms) > 0 { - rec = enc.FloatHistogramSamples(a.floatHistograms, buf) + var customBucketsFloatHistograms []record.RefFloatHistogramSample + rec, customBucketsFloatHistograms = enc.FloatHistogramSamples(a.floatHistograms, buf) buf = rec[:0] - if err := a.head.wal.Log(rec); err != nil { - return fmt.Errorf("log float histograms: %w", err) + if len(rec) > 0 { + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log float histograms: %w", err) + } + } + + if len(customBucketsFloatHistograms) > 0 { + rec = enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, buf) + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log custom buckets float histograms: %w", err) + } } } // Exemplars should be logged after samples (float/native histogram/etc), @@ -1070,12 +1090,24 @@ func (acc *appenderCommitContext) collectOOORecords(a *headAppender) { acc.oooRecords = append(acc.oooRecords, r) } if len(acc.wblHistograms) > 0 { - r := acc.enc.HistogramSamples(acc.wblHistograms, a.head.getBytesBuffer()) - acc.oooRecords = append(acc.oooRecords, r) + r, customBucketsHistograms := acc.enc.HistogramSamples(acc.wblHistograms, a.head.getBytesBuffer()) + if len(r) > 0 { + acc.oooRecords = append(acc.oooRecords, r) + } + if len(customBucketsHistograms) > 0 { + r := acc.enc.CustomBucketsHistogramSamples(customBucketsHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } } if len(acc.wblFloatHistograms) > 0 { - r := acc.enc.FloatHistogramSamples(acc.wblFloatHistograms, a.head.getBytesBuffer()) - acc.oooRecords = append(acc.oooRecords, r) + r, customBucketsFloatHistograms := acc.enc.FloatHistogramSamples(acc.wblFloatHistograms, a.head.getBytesBuffer()) + if len(r) > 0 { + acc.oooRecords = append(acc.oooRecords, r) + } + if len(customBucketsFloatHistograms) > 0 { + r := acc.enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } } acc.wblSamples = nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index 79ed0f02406..b95257c28a0 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -103,20 +103,7 @@ func (h *headIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Ma // Postings returns the postings list iterator for the label pairs. func (h *headIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) { - switch len(values) { - case 0: - return index.EmptyPostings(), nil - case 1: - return h.head.postings.Get(name, values[0]), nil - default: - res := make([]index.Postings, 0, len(values)) - for _, value := range values { - if p := h.head.postings.Get(name, value); !index.IsEmptyPostingsType(p) { - res = append(res, p) - } - } - return index.Merge(ctx, res...), nil - } + return h.head.postings.Postings(ctx, name, values...), nil } func (h *headIndexReader) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) index.Postings { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index 6744d582ae1..0afe84a875b 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -187,7 +187,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decoded <- exemplars - case record.HistogramSamples: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: hists := histogramsPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { @@ -199,7 +199,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decoded <- hists - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: hists := floatHistogramsPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { @@ -694,9 +694,9 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch go func() { defer close(decodedCh) - var err error dec := record.NewDecoder(syms) for r.Next() { + var err error rec := r.Record() switch dec.Type(rec) { case record.Samples: @@ -723,7 +723,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decodedCh <- markers - case record.HistogramSamples: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: hists := histogramSamplesPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { @@ -735,7 +735,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decodedCh <- hists - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: hists := floatHistogramSamplesPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index a2c5a822391..e3ba5d64b4a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -235,25 +235,9 @@ func (p *MemPostings) Stats(label string, limit int, labelSizeFunc func(string, } } -// Get returns a postings list for the given label pair. -func (p *MemPostings) Get(name, value string) Postings { - var lp []storage.SeriesRef - p.mtx.RLock() - l := p.m[name] - if l != nil { - lp = l[value] - } - p.mtx.RUnlock() - - if lp == nil { - return EmptyPostings() - } - return newListPostings(lp...) -} - // All returns a postings list over all documents ever added. func (p *MemPostings) All() Postings { - return p.Get(AllPostingsKey()) + return p.Postings(context.Background(), allPostingsKey.Name, allPostingsKey.Value) } // EnsureOrder ensures that all postings lists are sorted. After it returns all further @@ -490,7 +474,7 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, } // Now `vals` only contains the values that matched, get their postings. - its := make([]Postings, 0, len(vals)) + its := make([]*ListPostings, 0, len(vals)) lps := make([]ListPostings, len(vals)) p.mtx.RLock() e := p.m[name] @@ -510,11 +494,27 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, return Merge(ctx, its...) } +// Postings returns a postings iterator for the given label values. +func (p *MemPostings) Postings(ctx context.Context, name string, values ...string) Postings { + res := make([]*ListPostings, 0, len(values)) + lps := make([]ListPostings, len(values)) + p.mtx.RLock() + postingsMapForName := p.m[name] + for i, value := range values { + if lp := postingsMapForName[value]; lp != nil { + lps[i] = ListPostings{list: lp} + res = append(res, &lps[i]) + } + } + p.mtx.RUnlock() + return Merge(ctx, res...) +} + func (p *MemPostings) PostingsForAllLabelValues(ctx context.Context, name string) Postings { p.mtx.RLock() e := p.m[name] - its := make([]Postings, 0, len(e)) + its := make([]*ListPostings, 0, len(e)) lps := make([]ListPostings, len(e)) i := 0 for _, refs := range e { @@ -660,7 +660,7 @@ func (it *intersectPostings) Err() error { } // Merge returns a new iterator over the union of the input iterators. -func Merge(_ context.Context, its ...Postings) Postings { +func Merge[T Postings](_ context.Context, its ...T) Postings { if len(its) == 0 { return EmptyPostings() } @@ -675,19 +675,19 @@ func Merge(_ context.Context, its ...Postings) Postings { return p } -type mergedPostings struct { - p []Postings - h *loser.Tree[storage.SeriesRef, Postings] +type mergedPostings[T Postings] struct { + p []T + h *loser.Tree[storage.SeriesRef, T] cur storage.SeriesRef } -func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) { +func newMergedPostings[T Postings](p []T) (m *mergedPostings[T], nonEmpty bool) { const maxVal = storage.SeriesRef(math.MaxUint64) // This value must be higher than all real values used in the tree. lt := loser.New(p, maxVal) - return &mergedPostings{p: p, h: lt}, true + return &mergedPostings[T]{p: p, h: lt}, true } -func (it *mergedPostings) Next() bool { +func (it *mergedPostings[T]) Next() bool { for { if !it.h.Next() { return false @@ -701,7 +701,7 @@ func (it *mergedPostings) Next() bool { } } -func (it *mergedPostings) Seek(id storage.SeriesRef) bool { +func (it *mergedPostings[T]) Seek(id storage.SeriesRef) bool { for !it.h.IsEmpty() && it.h.At() < id { finished := !it.h.Winner().Seek(id) it.h.Fix(finished) @@ -713,11 +713,11 @@ func (it *mergedPostings) Seek(id storage.SeriesRef) bool { return true } -func (it mergedPostings) At() storage.SeriesRef { +func (it mergedPostings[T]) At() storage.SeriesRef { return it.cur } -func (it mergedPostings) Err() error { +func (it mergedPostings[T]) Err() error { for _, p := range it.p { if err := p.Err(); err != nil { return err @@ -863,6 +863,11 @@ func (it *ListPostings) Err() error { return nil } +// Len returns the remaining number of postings in the list. +func (it *ListPostings) Len() int { + return len(it.list) +} + // bigEndianPostings implements the Postings interface over a byte stream of // big endian numbers. type bigEndianPostings struct { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go index 784d0b23d7c..4d2a52b9af1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go @@ -52,6 +52,10 @@ const ( HistogramSamples Type = 7 // FloatHistogramSamples is used to match WAL records of type Float Histograms. FloatHistogramSamples Type = 8 + // CustomBucketsHistogramSamples is used to match WAL records of type Histogram with custom buckets. + CustomBucketsHistogramSamples Type = 9 + // CustomBucketsFloatHistogramSamples is used to match WAL records of type Float Histogram with custom buckets. + CustomBucketsFloatHistogramSamples Type = 10 ) func (rt Type) String() string { @@ -68,6 +72,10 @@ func (rt Type) String() string { return "histogram_samples" case FloatHistogramSamples: return "float_histogram_samples" + case CustomBucketsHistogramSamples: + return "custom_buckets_histogram_samples" + case CustomBucketsFloatHistogramSamples: + return "custom_buckets_float_histogram_samples" case MmapMarkers: return "mmapmarkers" case Metadata: @@ -207,7 +215,7 @@ func (d *Decoder) Type(rec []byte) Type { return Unknown } switch t := Type(rec[0]); t { - case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples: + case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples, CustomBucketsHistogramSamples, CustomBucketsFloatHistogramSamples: return t } return Unknown @@ -428,7 +436,7 @@ func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMar func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) - if t != HistogramSamples { + if t != HistogramSamples && t != CustomBucketsHistogramSamples { return nil, errors.New("invalid record type") } if dec.Len() == 0 { @@ -505,12 +513,22 @@ func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) { for i := range h.NegativeBuckets { h.NegativeBuckets[i] = buf.Varint64() } + + if histogram.IsCustomBucketsSchema(h.Schema) { + l = buf.Uvarint() + if l > 0 { + h.CustomValues = make([]float64, l) + } + for i := range h.CustomValues { + h.CustomValues[i] = buf.Be64Float64() + } + } } func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) - if t != FloatHistogramSamples { + if t != FloatHistogramSamples && t != CustomBucketsFloatHistogramSamples { return nil, errors.New("invalid record type") } if dec.Len() == 0 { @@ -587,6 +605,16 @@ func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) { for i := range fh.NegativeBuckets { fh.NegativeBuckets[i] = buf.Be64Float64() } + + if histogram.IsCustomBucketsSchema(fh.Schema) { + l = buf.Uvarint() + if l > 0 { + fh.CustomValues = make([]float64, l) + } + for i := range fh.CustomValues { + fh.CustomValues[i] = buf.Be64Float64() + } + } } // Encoder encodes series, sample, and tombstones records. @@ -716,10 +744,44 @@ func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte { return buf.Get() } -func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) []byte { +func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([]byte, []RefHistogramSample) { buf := encoding.Encbuf{B: b} buf.PutByte(byte(HistogramSamples)) + if len(histograms) == 0 { + return buf.Get(), nil + } + var customBucketHistograms []RefHistogramSample + + // Store base timestamp and base reference number of first histogram. + // All histograms encode their timestamp and ref as delta to those. + first := histograms[0] + buf.PutBE64(uint64(first.Ref)) + buf.PutBE64int64(first.T) + + for _, h := range histograms { + if h.H.UsesCustomBuckets() { + customBucketHistograms = append(customBucketHistograms, h) + continue + } + buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + buf.PutVarint64(h.T - first.T) + + EncodeHistogram(&buf, h.H) + } + + // Reset buffer if only custom bucket histograms existed in list of histogram samples. + if len(histograms) == len(customBucketHistograms) { + buf.Reset() + } + + return buf.Get(), customBucketHistograms +} + +func (e *Encoder) CustomBucketsHistogramSamples(histograms []RefHistogramSample, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(CustomBucketsHistogramSamples)) + if len(histograms) == 0 { return buf.Get() } @@ -772,12 +834,54 @@ func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) { for _, b := range h.NegativeBuckets { buf.PutVarint64(b) } + + if histogram.IsCustomBucketsSchema(h.Schema) { + buf.PutUvarint(len(h.CustomValues)) + for _, v := range h.CustomValues { + buf.PutBEFloat64(v) + } + } } -func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { +func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) ([]byte, []RefFloatHistogramSample) { buf := encoding.Encbuf{B: b} buf.PutByte(byte(FloatHistogramSamples)) + if len(histograms) == 0 { + return buf.Get(), nil + } + + var customBucketsFloatHistograms []RefFloatHistogramSample + + // Store base timestamp and base reference number of first histogram. + // All histograms encode their timestamp and ref as delta to those. + first := histograms[0] + buf.PutBE64(uint64(first.Ref)) + buf.PutBE64int64(first.T) + + for _, h := range histograms { + if h.FH.UsesCustomBuckets() { + customBucketsFloatHistograms = append(customBucketsFloatHistograms, h) + continue + } + buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + buf.PutVarint64(h.T - first.T) + + EncodeFloatHistogram(&buf, h.FH) + } + + // Reset buffer if only custom bucket histograms existed in list of histogram samples + if len(histograms) == len(customBucketsFloatHistograms) { + buf.Reset() + } + + return buf.Get(), customBucketsFloatHistograms +} + +func (e *Encoder) CustomBucketsFloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(CustomBucketsFloatHistogramSamples)) + if len(histograms) == 0 { return buf.Get() } @@ -830,4 +934,11 @@ func EncodeFloatHistogram(buf *encoding.Encbuf, h *histogram.FloatHistogram) { for _, b := range h.NegativeBuckets { buf.PutBEFloat64(b) } + + if histogram.IsCustomBucketsSchema(h.Schema) { + buf.PutUvarint(len(h.CustomValues)) + for _, v := range h.CustomValues { + buf.PutBEFloat64(v) + } + } } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/testutil.go b/vendor/github.com/prometheus/prometheus/tsdb/testutil.go index 57516c62710..e957b0307b6 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/testutil.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/testutil.go @@ -29,11 +29,13 @@ import ( ) const ( - float = "float" - intHistogram = "integer histogram" - floatHistogram = "float histogram" - gaugeIntHistogram = "gauge int histogram" - gaugeFloatHistogram = "gauge float histogram" + float = "float" + intHistogram = "integer histogram" + floatHistogram = "float histogram" + customBucketsIntHistogram = "custom buckets int histogram" + customBucketsFloatHistogram = "custom buckets float histogram" + gaugeIntHistogram = "gauge int histogram" + gaugeFloatHistogram = "gauge float histogram" ) type testValue struct { @@ -82,6 +84,28 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{ return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(value)} }, }, + customBucketsIntHistogram: { + sampleType: sampleMetricTypeHistogram, + appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { + s := sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(value)} + ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil) + return ref, s, err + }, + sampleFunc: func(ts, value int64) sample { + return sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(value)} + }, + }, + customBucketsFloatHistogram: { + sampleType: sampleMetricTypeHistogram, + appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { + s := sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(value)} + ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh) + return ref, s, err + }, + sampleFunc: func(ts, value int64) sample { + return sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(value)} + }, + }, gaugeIntHistogram: { sampleType: sampleMetricTypeHistogram, appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go index 60c3e5f7261..a923519ef77 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go @@ -57,6 +57,17 @@ func GenerateTestHistogram(i int64) *histogram.Histogram { } } +func GenerateTestCustomBucketsHistograms(n int) (r []*histogram.Histogram) { + for i := 0; i < n; i++ { + h := GenerateTestCustomBucketsHistogram(int64(i)) + if i > 0 { + h.CounterResetHint = histogram.NotCounterReset + } + r = append(r, h) + } + return r +} + func GenerateTestCustomBucketsHistogram(i int64) *histogram.Histogram { return &histogram.Histogram{ Count: 5 + uint64(i*4), @@ -117,6 +128,17 @@ func GenerateTestFloatHistogram(i int64) *histogram.FloatHistogram { } } +func GenerateTestCustomBucketsFloatHistograms(n int) (r []*histogram.FloatHistogram) { + for i := 0; i < n; i++ { + h := GenerateTestCustomBucketsFloatHistogram(int64(i)) + if i > 0 { + h.CounterResetHint = histogram.NotCounterReset + } + r = append(r, h) + } + return r +} + func GenerateTestCustomBucketsFloatHistogram(i int64) *histogram.FloatHistogram { return &histogram.FloatHistogram{ Count: 5 + float64(i*4), diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go index 58e11c770e0..5c607d70302 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go @@ -81,7 +81,8 @@ func DeleteCheckpoints(dir string, maxIndex int) error { return errs.Err() } -const checkpointPrefix = "checkpoint." +// CheckpointPrefix is the prefix used for checkpoint files. +const CheckpointPrefix = "checkpoint." // Checkpoint creates a compacted checkpoint of segments in range [from, to] in the given WAL. // It includes the most recent checkpoint if it exists. @@ -221,11 +222,27 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } } if len(repl) > 0 { - buf = enc.HistogramSamples(repl, buf) + buf, _ = enc.HistogramSamples(repl, buf) + } + stats.TotalSamples += len(histogramSamples) + stats.DroppedSamples += len(histogramSamples) - len(repl) + case record.CustomBucketsHistogramSamples: + histogramSamples, err = dec.HistogramSamples(rec, histogramSamples) + if err != nil { + return nil, fmt.Errorf("decode histogram samples: %w", err) + } + // Drop irrelevant histogramSamples in place. + repl := histogramSamples[:0] + for _, h := range histogramSamples { + if h.T >= mint { + repl = append(repl, h) + } + } + if len(repl) > 0 { + buf = enc.CustomBucketsHistogramSamples(repl, buf) } stats.TotalSamples += len(histogramSamples) stats.DroppedSamples += len(histogramSamples) - len(repl) - case record.FloatHistogramSamples: floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples) if err != nil { @@ -239,11 +256,27 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } } if len(repl) > 0 { - buf = enc.FloatHistogramSamples(repl, buf) + buf, _ = enc.FloatHistogramSamples(repl, buf) + } + stats.TotalSamples += len(floatHistogramSamples) + stats.DroppedSamples += len(floatHistogramSamples) - len(repl) + case record.CustomBucketsFloatHistogramSamples: + floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples) + if err != nil { + return nil, fmt.Errorf("decode float histogram samples: %w", err) + } + // Drop irrelevant floatHistogramSamples in place. + repl := floatHistogramSamples[:0] + for _, fh := range floatHistogramSamples { + if fh.T >= mint { + repl = append(repl, fh) + } + } + if len(repl) > 0 { + buf = enc.CustomBucketsFloatHistogramSamples(repl, buf) } stats.TotalSamples += len(floatHistogramSamples) stats.DroppedSamples += len(floatHistogramSamples) - len(repl) - case record.Tombstones: tstones, err = dec.Tombstones(rec, tstones) if err != nil { @@ -363,7 +396,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } func checkpointDir(dir string, i int) string { - return filepath.Join(dir, fmt.Sprintf(checkpointPrefix+"%08d", i)) + return filepath.Join(dir, fmt.Sprintf(CheckpointPrefix+"%08d", i)) } type checkpointRef struct { @@ -379,13 +412,13 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) { for i := 0; i < len(files); i++ { fi := files[i] - if !strings.HasPrefix(fi.Name(), checkpointPrefix) { + if !strings.HasPrefix(fi.Name(), CheckpointPrefix) { continue } if !fi.IsDir() { return nil, fmt.Errorf("checkpoint %s is not a directory", fi.Name()) } - idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):]) + idx, err := strconv.Atoi(fi.Name()[len(CheckpointPrefix):]) if err != nil { continue } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go index 89db5d2dd72..ca74a9ceafc 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go @@ -546,7 +546,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { } w.writer.AppendExemplars(exemplars) - case record.HistogramSamples: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: // Skip if experimental "histograms over remote write" is not enabled. if !w.sendHistograms { break @@ -574,7 +574,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { histogramsToSend = histogramsToSend[:0] } - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: // Skip if experimental "histograms over remote write" is not enabled. if !w.sendHistograms { break @@ -679,7 +679,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err // Ensure we read the whole contents of every segment in the checkpoint dir. segs, err := listSegments(checkpointDir) if err != nil { - return fmt.Errorf("Unable to get segments checkpoint dir: %w", err) + return fmt.Errorf("unable to get segments checkpoint dir: %w", err) } for _, segRef := range segs { size, err := getSegmentSize(checkpointDir, segRef.index) diff --git a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go index 1b743f70576..5b2fde152bd 100644 --- a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go +++ b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go @@ -148,6 +148,7 @@ var ( HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo) HistogramIgnoredInAggregationInfo = fmt.Errorf("%w: ignored histogram in", PromQLInfo) + HistogramIgnoredInMixedRangeInfo = fmt.Errorf("%w: ignored histograms in a range containing both floats and histograms for metric name", PromQLInfo) ) type annoErr struct { @@ -293,3 +294,10 @@ func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.Posit Err: fmt.Errorf("%w %s aggregation", HistogramIgnoredInAggregationInfo, aggregation), } } + +func NewHistogramIgnoredInMixedRangeInfo(metricName string, pos posrange.PositionRange) error { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %q", HistogramIgnoredInMixedRangeInfo, metricName), + } +} diff --git a/vendor/github.com/prometheus/prometheus/util/logging/file.go b/vendor/github.com/prometheus/prometheus/util/logging/file.go index 9db7fb722be..27fdec27589 100644 --- a/vendor/github.com/prometheus/prometheus/util/logging/file.go +++ b/vendor/github.com/prometheus/prometheus/util/logging/file.go @@ -16,16 +16,22 @@ package logging import ( "context" "fmt" + "io" "log/slog" "os" "github.com/prometheus/common/promslog" ) -// JSONFileLogger represents a logger that writes JSON to a file. It implements the promql.QueryLogger interface. +var _ slog.Handler = (*JSONFileLogger)(nil) + +var _ io.Closer = (*JSONFileLogger)(nil) + +// JSONFileLogger represents a logger that writes JSON to a file. It implements +// the slog.Handler interface, as well as the io.Closer interface. type JSONFileLogger struct { - logger *slog.Logger - file *os.File + handler slog.Handler + file *os.File } // NewJSONFileLogger returns a new JSONFileLogger. @@ -42,24 +48,47 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) { jsonFmt := &promslog.AllowedFormat{} _ = jsonFmt.Set("json") return &JSONFileLogger{ - logger: promslog.New(&promslog.Config{Format: jsonFmt, Writer: f}), - file: f, + handler: promslog.New(&promslog.Config{Format: jsonFmt, Writer: f}).Handler(), + file: f, }, nil } -// Close closes the underlying file. It implements the promql.QueryLogger interface. +// Close closes the underlying file. It implements the io.Closer interface. func (l *JSONFileLogger) Close() error { return l.file.Close() } -// With calls the `With()` method on the underlying `log/slog.Logger` with the -// provided msg and args. It implements the promql.QueryLogger interface. -func (l *JSONFileLogger) With(args ...any) { - l.logger = l.logger.With(args...) +// Enabled returns true if and only if the internal slog.Handler is enabled. It +// implements the slog.Handler interface. +func (l *JSONFileLogger) Enabled(ctx context.Context, level slog.Level) bool { + return l.handler.Enabled(ctx, level) +} + +// Handle takes record created by an slog.Logger and forwards it to the +// internal slog.Handler for dispatching the log call to the backing file. It +// implements the slog.Handler interface. +func (l *JSONFileLogger) Handle(ctx context.Context, r slog.Record) error { + return l.handler.Handle(ctx, r.Clone()) +} + +// WithAttrs returns a new *JSONFileLogger with a new internal handler that has +// the provided attrs attached as attributes on all further log calls. It +// implements the slog.Handler interface. +func (l *JSONFileLogger) WithAttrs(attrs []slog.Attr) slog.Handler { + if len(attrs) == 0 { + return l + } + + return &JSONFileLogger{file: l.file, handler: l.handler.WithAttrs(attrs)} } -// Log calls the `Log()` method on the underlying `log/slog.Logger` with the -// provided msg and args. It implements the promql.QueryLogger interface. -func (l *JSONFileLogger) Log(ctx context.Context, level slog.Level, msg string, args ...any) { - l.logger.Log(ctx, level, msg, args...) +// WithGroup returns a new *JSONFileLogger with a new internal handler that has +// the provided group name attached, to group all other attributes added to the +// logger. It implements the slog.Handler interface. +func (l *JSONFileLogger) WithGroup(name string) slog.Handler { + if name == "" { + return l + } + + return &JSONFileLogger{file: l.file, handler: l.handler.WithGroup(name)} } diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/port.go b/vendor/github.com/prometheus/prometheus/util/testutil/port.go index 7cf4cf1ccc9..91c1291749f 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/port.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/port.go @@ -15,6 +15,7 @@ package testutil import ( "net" + "slices" "sync" "testing" ) @@ -48,12 +49,7 @@ func RandomUnprivilegedPort(t *testing.T) int { } func portWasUsed(port int) bool { - for _, usedPort := range usedPorts { - if port == usedPort { - return true - } - } - return false + return slices.Contains(usedPorts, port) } func getPort() (int, error) { diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 392dfc6aabf..1b8798feb85 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -68,6 +68,9 @@ const ( // Non-standard status code (originally introduced by nginx) for the case when a client closes // the connection while the server is still processing the request. statusClientClosedConnection = 499 + + // checkContextEveryNIterations is used in some tight loops to check if the context is done. + checkContextEveryNIterations = 128 ) type errorType string @@ -144,6 +147,8 @@ type PrometheusVersion struct { type RuntimeInfo struct { StartTime time.Time `json:"startTime"` CWD string `json:"CWD"` + Hostname string `json:"hostname"` + ServerTime time.Time `json:"serverTime"` ReloadConfigSuccess bool `json:"reloadConfigSuccess"` LastConfigTime time.Time `json:"lastConfigTime"` CorruptionCount int64 `json:"corruptionCount"` @@ -257,7 +262,8 @@ func NewAPI( statsRenderer StatsRenderer, rwEnabled bool, acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg, - otlpEnabled bool, + otlpEnabled, otlpDeltaToCumulative bool, + ctZeroIngestionEnabled bool, ) *API { a := &API{ QueryEngine: qe, @@ -301,10 +307,10 @@ func NewAPI( } if rwEnabled { - a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs) + a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, ctZeroIngestionEnabled) } if otlpEnabled { - a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap, configFunc) + a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{ConvertDelta: otlpDeltaToCumulative}) } return a @@ -435,6 +441,10 @@ func (api *API) options(*http.Request) apiFuncResult { } func (api *API) query(r *http.Request) (result apiFuncResult) { + limit, err := parseLimitParam(r.FormValue("limit")) + if err != nil { + return invalidParamError(err, "limit") + } ts, err := parseTimeParam(r, "time", api.now()) if err != nil { return invalidParamError(err, "time") @@ -476,6 +486,15 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close} } + warnings := res.Warnings + if limit > 0 { + var isTruncated bool + + res, isTruncated = truncateResults(res, limit) + if isTruncated { + warnings = warnings.Add(errors.New("results truncated due to limit")) + } + } // Optional stats field in response if parameter "stats" is not empty. sr := api.statsRenderer if sr == nil { @@ -487,7 +506,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { ResultType: res.Value.Type(), Result: res.Value, Stats: qs, - }, nil, res.Warnings, qry.Close} + }, nil, warnings, qry.Close} } func (api *API) formatQuery(r *http.Request) (result apiFuncResult) { @@ -523,6 +542,10 @@ func extractQueryOpts(r *http.Request) (promql.QueryOpts, error) { } func (api *API) queryRange(r *http.Request) (result apiFuncResult) { + limit, err := parseLimitParam(r.FormValue("limit")) + if err != nil { + return invalidParamError(err, "limit") + } start, err := parseTime(r.FormValue("start")) if err != nil { return invalidParamError(err, "start") @@ -587,6 +610,16 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close} } + warnings := res.Warnings + if limit > 0 { + var isTruncated bool + + res, isTruncated = truncateResults(res, limit) + if isTruncated { + warnings = warnings.Add(errors.New("results truncated due to limit")) + } + } + // Optional stats field in response if parameter "stats" is not empty. sr := api.statsRenderer if sr == nil { @@ -598,7 +631,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { ResultType: res.Value.Type(), Result: res.Value, Stats: qs, - }, nil, res.Warnings, qry.Close} + }, nil, warnings, qry.Close} } func (api *API) queryExemplars(r *http.Request) apiFuncResult { @@ -932,10 +965,15 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { warnings := set.Warnings() + i := 1 for set.Next() { - if err := ctx.Err(); err != nil { - return apiFuncResult{nil, returnAPIError(err), warnings, closer} + if i%checkContextEveryNIterations == 0 { + if err := ctx.Err(); err != nil { + return apiFuncResult{nil, returnAPIError(err), warnings, closer} + } } + i++ + metrics = append(metrics, set.At().Labels()) if limit > 0 && len(metrics) > limit { @@ -1083,12 +1121,12 @@ func (api *API) targets(r *http.Request) apiFuncResult { showActive := state == "" || state == "any" || state == "active" showDropped := state == "" || state == "any" || state == "dropped" res := &TargetDiscovery{} + builder := labels.NewBuilder(labels.EmptyLabels()) if showActive { targetsActive := api.targetRetriever(r.Context()).TargetsActive() activeKeys, numTargets := sortKeys(targetsActive) res.ActiveTargets = make([]*Target, 0, numTargets) - builder := labels.NewScratchBuilder(0) for _, key := range activeKeys { if scrapePool != "" && key != scrapePool { @@ -1104,8 +1142,8 @@ func (api *API) targets(r *http.Request) apiFuncResult { globalURL, err := getGlobalURL(target.URL(), api.globalURLOptions) res.ActiveTargets = append(res.ActiveTargets, &Target{ - DiscoveredLabels: target.DiscoveredLabels(), - Labels: target.Labels(&builder), + DiscoveredLabels: target.DiscoveredLabels(builder), + Labels: target.Labels(builder), ScrapePool: key, ScrapeURL: target.URL().String(), GlobalURL: globalURL.String(), @@ -1143,7 +1181,7 @@ func (api *API) targets(r *http.Request) apiFuncResult { } for _, target := range targetsDropped[key] { res.DroppedTargets = append(res.DroppedTargets, &DroppedTarget{ - DiscoveredLabels: target.DiscoveredLabels(), + DiscoveredLabels: target.DiscoveredLabels(builder), }) } } @@ -1181,7 +1219,7 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { } } - builder := labels.NewScratchBuilder(0) + builder := labels.NewBuilder(labels.EmptyLabels()) metric := r.FormValue("metric") res := []metricMetadata{} for _, tt := range api.targetRetriever(r.Context()).TargetsActive() { @@ -1189,7 +1227,7 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { if limit >= 0 && len(res) >= limit { break } - targetLabels := t.Labels(&builder) + targetLabels := t.Labels(builder) // Filter targets that don't satisfy the label matchers. if matchTarget != "" && !matchLabels(targetLabels, matchers) { continue @@ -1198,11 +1236,11 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { if metric == "" { for _, md := range t.ListMetadata() { res = append(res, metricMetadata{ - Target: targetLabels, - Metric: md.Metric, - Type: md.Type, - Help: md.Help, - Unit: md.Unit, + Target: targetLabels, + MetricFamily: md.MetricFamily, + Type: md.Type, + Help: md.Help, + Unit: md.Unit, }) } continue @@ -1223,11 +1261,11 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { } type metricMetadata struct { - Target labels.Labels `json:"target"` - Metric string `json:"metric,omitempty"` - Type model.MetricType `json:"type"` - Help string `json:"help"` - Unit string `json:"unit"` + Target labels.Labels `json:"target"` + MetricFamily string `json:"metric,omitempty"` + Type model.MetricType `json:"type"` + Help string `json:"help"` + Unit string `json:"unit"` } // AlertmanagerDiscovery has all the active Alertmanagers. @@ -1327,7 +1365,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { if metric == "" { for _, mm := range t.ListMetadata() { m := metadata.Metadata{Type: mm.Type, Help: mm.Help, Unit: mm.Unit} - ms, ok := metrics[mm.Metric] + ms, ok := metrics[mm.MetricFamily] if limitPerMetric > 0 && len(ms) >= limitPerMetric { continue @@ -1335,7 +1373,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { if !ok { ms = map[metadata.Metadata]struct{}{} - metrics[mm.Metric] = ms + metrics[mm.MetricFamily] = ms } ms[m] = struct{}{} } @@ -1344,7 +1382,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { if md, ok := t.GetMetadata(metric); ok { m := metadata.Metadata{Type: md.Type, Help: md.Help, Unit: md.Unit} - ms, ok := metrics[md.Metric] + ms, ok := metrics[md.MetricFamily] if limitPerMetric > 0 && len(ms) >= limitPerMetric { continue @@ -1352,7 +1390,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { if !ok { ms = map[metadata.Metadata]struct{}{} - metrics[md.Metric] = ms + metrics[md.MetricFamily] = ms } ms[m] = struct{}{} } @@ -2013,7 +2051,7 @@ func parseTimeParam(r *http.Request, paramName string, defaultValue time.Time) ( } result, err := parseTime(val) if err != nil { - return time.Time{}, fmt.Errorf("Invalid time value for '%s': %w", paramName, err) + return time.Time{}, fmt.Errorf("invalid time value for '%s': %w", paramName, err) } return result, nil } @@ -2099,3 +2137,25 @@ func toHintLimit(limit int) int { } return limit } + +// truncateResults truncates result for queryRange() and query(). +// No truncation for other types(Scalars or Strings). +func truncateResults(result *promql.Result, limit int) (*promql.Result, bool) { + isTruncated := false + + switch v := result.Value.(type) { + case promql.Matrix: + if len(v) > limit { + result.Value = v[:limit] + isTruncated = true + } + case promql.Vector: + if len(v) > limit { + result.Value = v[:limit] + isTruncated = true + } + } + + // Return the modified result. Unchanged for other types. + return result, isTruncated +} diff --git a/vendor/github.com/prometheus/sigv4/.yamllint b/vendor/github.com/prometheus/sigv4/.yamllint index 281c9464634..8d09c375fd8 100644 --- a/vendor/github.com/prometheus/sigv4/.yamllint +++ b/vendor/github.com/prometheus/sigv4/.yamllint @@ -1,5 +1,7 @@ --- extends: default +ignore: | + **/node_modules rules: braces: @@ -14,14 +16,10 @@ rules: document-start: disable indentation: spaces: consistent + indent-sequences: consistent key-duplicates: ignore: | config/testdata/section_key_dup.bad.yml line-length: disable truthy: - ignore: | - .github/workflows/codeql-analysis.yml - .github/workflows/funcbench.yml - .github/workflows/fuzzing.yml - .github/workflows/prombench.yml - .github/workflows/golangci-lint.yml + check-keys: false diff --git a/vendor/github.com/prometheus/sigv4/Makefile.common b/vendor/github.com/prometheus/sigv4/Makefile.common index cbb5d86382a..fc47bdbb215 100644 --- a/vendor/github.com/prometheus/sigv4/Makefile.common +++ b/vendor/github.com/prometheus/sigv4/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.60.2 +GOLANGCI_LINT_VERSION ?= v1.62.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/vendor/go.opentelemetry.io/collector/component/LICENSE b/vendor/go.opentelemetry.io/collector/component/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/component/Makefile b/vendor/go.opentelemetry.io/collector/component/Makefile new file mode 100644 index 00000000000..39734bfaebb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/Makefile @@ -0,0 +1 @@ +include ../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/component/build_info.go b/vendor/go.opentelemetry.io/collector/component/build_info.go new file mode 100644 index 00000000000..bcfa7430c13 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/build_info.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package component // import "go.opentelemetry.io/collector/component" + +// BuildInfo is the information that is logged at the application start and +// passed into each component. This information can be overridden in custom build. +type BuildInfo struct { + // Command is the executable file name, e.g. "otelcol". + Command string + + // Description is the full name of the collector, e.g. "OpenTelemetry Collector". + Description string + + // Version string. + Version string +} + +// NewDefaultBuildInfo returns a default BuildInfo. +func NewDefaultBuildInfo() BuildInfo { + return BuildInfo{ + Command: "otelcol", + Description: "OpenTelemetry Collector", + Version: "latest", + } +} diff --git a/vendor/go.opentelemetry.io/collector/component/component.go b/vendor/go.opentelemetry.io/collector/component/component.go new file mode 100644 index 00000000000..0a0c160fc59 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/component.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package component outlines the abstraction of components within the OpenTelemetry Collector. It provides details on the component +// lifecycle as well as defining the interface that components must fulfill. +package component // import "go.opentelemetry.io/collector/component" + +import ( + "context" + "fmt" + "strings" +) + +// Component is either a receiver, exporter, processor, connector, or an extension. +// +// A component's lifecycle has the following phases: +// +// 1. Creation: The component is created using its respective factory, via a Create* call. +// 2. Start: The component's Start method is called. +// 3. Running: The component is up and running. +// 4. Shutdown: The component's Shutdown method is called and the lifecycle is complete. +// +// Once the lifecycle is complete it may be repeated, in which case a new component +// is created, starts, runs and is shutdown again. +type Component interface { + // Start tells the component to start. Host parameter can be used for communicating + // with the host after Start() has already returned. If an error is returned by + // Start() then the collector startup will be aborted. + // If this is an exporter component it may prepare for exporting + // by connecting to the endpoint. + // + // If the component needs to perform a long-running starting operation then it is recommended + // that Start() returns quickly and the long-running operation is performed in background. + // In that case make sure that the long-running operation does not use the context passed + // to Start() function since that context will be cancelled soon and can abort the long-running + // operation. Create a new context from the context.Background() for long-running operations. + Start(ctx context.Context, host Host) error + + // Shutdown is invoked during service shutdown. After Shutdown() is called, if the component + // accepted data in any way, it should not accept it anymore. + // + // This method must be safe to call: + // - without Start() having been called + // - if the component is in a shutdown state already + // + // If there are any background operations running by the component they must be aborted before + // this function returns. Remember that if you started any long-running background operations from + // the Start() method, those operations must be also cancelled. If there are any buffers in the + // component, they should be cleared and the data sent immediately to the next component. + // + // The component's lifecycle is completed once the Shutdown() method returns. No other + // methods of the component are called after that. If necessary a new component with + // the same or different configuration may be created and started (this may happen + // for example if we want to restart the component). + Shutdown(ctx context.Context) error +} + +// StartFunc specifies the function invoked when the component.Component is being started. +type StartFunc func(context.Context, Host) error + +// Start starts the component. +func (f StartFunc) Start(ctx context.Context, host Host) error { + if f == nil { + return nil + } + return f(ctx, host) +} + +// ShutdownFunc specifies the function invoked when the component.Component is being shutdown. +type ShutdownFunc func(context.Context) error + +// Shutdown shuts down the component. +func (f ShutdownFunc) Shutdown(ctx context.Context) error { + if f == nil { + return nil + } + return f(ctx) +} + +// Kind represents component kinds. +type Kind int + +const ( + _ Kind = iota // skip 0, start types from 1. + KindReceiver + KindProcessor + KindExporter + KindExtension + KindConnector +) + +func (k Kind) String() string { + switch k { + case KindReceiver: + return "Receiver" + case KindProcessor: + return "Processor" + case KindExporter: + return "Exporter" + case KindExtension: + return "Extension" + case KindConnector: + return "Connector" + } + return "" +} + +// StabilityLevel represents the stability level of the component created by the factory. +// The stability level is used to determine if the component should be used in production +// or not. For more details see: +// https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#stability-levels +type StabilityLevel int + +const ( + StabilityLevelUndefined StabilityLevel = iota // skip 0, start types from 1. + StabilityLevelUnmaintained + StabilityLevelDeprecated + StabilityLevelDevelopment + StabilityLevelAlpha + StabilityLevelBeta + StabilityLevelStable +) + +func (sl *StabilityLevel) UnmarshalText(in []byte) error { + str := strings.ToLower(string(in)) + switch str { + case "undefined": + *sl = StabilityLevelUndefined + case "unmaintained": + *sl = StabilityLevelUnmaintained + case "deprecated": + *sl = StabilityLevelDeprecated + case "development": + *sl = StabilityLevelDevelopment + case "alpha": + *sl = StabilityLevelAlpha + case "beta": + *sl = StabilityLevelBeta + case "stable": + *sl = StabilityLevelStable + default: + return fmt.Errorf("unsupported stability level: %q", string(in)) + } + return nil +} + +func (sl StabilityLevel) String() string { + switch sl { + case StabilityLevelUndefined: + return "Undefined" + case StabilityLevelUnmaintained: + return "Unmaintained" + case StabilityLevelDeprecated: + return "Deprecated" + case StabilityLevelDevelopment: + return "Development" + case StabilityLevelAlpha: + return "Alpha" + case StabilityLevelBeta: + return "Beta" + case StabilityLevelStable: + return "Stable" + } + return "" +} + +func (sl StabilityLevel) LogMessage() string { + switch sl { + case StabilityLevelUnmaintained: + return "Unmaintained component. Actively looking for contributors. Component will become deprecated after 3 months of remaining unmaintained." + case StabilityLevelDeprecated: + return "Deprecated component. Will be removed in future releases." + case StabilityLevelDevelopment: + return "Development component. May change in the future." + case StabilityLevelAlpha: + return "Alpha component. May change in the future." + case StabilityLevelBeta: + return "Beta component. May change in the future." + case StabilityLevelStable: + return "Stable component." + default: + return "Stability level of component is undefined" + } +} + +// Factory is implemented by all Component factories. +type Factory interface { + // Type gets the type of the component created by this factory. + Type() Type + + // CreateDefaultConfig creates the default configuration for the Component. + // This method can be called multiple times depending on the pipeline + // configuration and should not cause side effects that prevent the creation + // of multiple instances of the Component. + // The object returned by this method needs to pass the checks implemented by + // 'componenttest.CheckConfigStruct'. It is recommended to have these checks in the + // tests of any implementation of the Factory interface. + CreateDefaultConfig() Config +} + +// CreateDefaultConfigFunc is the equivalent of Factory.CreateDefaultConfig(). +type CreateDefaultConfigFunc func() Config + +// CreateDefaultConfig implements Factory.CreateDefaultConfig(). +func (f CreateDefaultConfigFunc) CreateDefaultConfig() Config { + return f() +} diff --git a/vendor/go.opentelemetry.io/collector/component/config.go b/vendor/go.opentelemetry.io/collector/component/config.go new file mode 100644 index 00000000000..599b9be3236 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/config.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package component // import "go.opentelemetry.io/collector/component" + +import ( + "reflect" + + "go.uber.org/multierr" +) + +// Config defines the configuration for a component.Component. +// +// Implementations and/or any sub-configs (other types embedded or included in the Config implementation) +// MUST implement the ConfigValidator if any validation is required for that part of the configuration +// (e.g. check if a required field is present). +// +// A valid implementation MUST pass the check componenttest.CheckConfigStruct (return nil error). +type Config any + +// As interface types are only used for static typing, a common idiom to find the reflection Type +// for an interface type Foo is to use a *Foo value. +var configValidatorType = reflect.TypeOf((*ConfigValidator)(nil)).Elem() + +// ConfigValidator defines an optional interface for configurations to implement to do validation. +type ConfigValidator interface { + // Validate the configuration and returns an error if invalid. + Validate() error +} + +// ValidateConfig validates a config, by doing this: +// - Call Validate on the config itself if the config implements ConfigValidator. +func ValidateConfig(cfg Config) error { + return validate(reflect.ValueOf(cfg)) +} + +func validate(v reflect.Value) error { + // Validate the value itself. + switch v.Kind() { + case reflect.Invalid: + return nil + case reflect.Ptr: + return validate(v.Elem()) + case reflect.Struct: + var errs error + errs = multierr.Append(errs, callValidateIfPossible(v)) + // Reflect on the pointed data and check each of its fields. + for i := 0; i < v.NumField(); i++ { + if !v.Type().Field(i).IsExported() { + continue + } + errs = multierr.Append(errs, validate(v.Field(i))) + } + return errs + case reflect.Slice, reflect.Array: + var errs error + errs = multierr.Append(errs, callValidateIfPossible(v)) + // Reflect on the pointed data and check each of its fields. + for i := 0; i < v.Len(); i++ { + errs = multierr.Append(errs, validate(v.Index(i))) + } + return errs + case reflect.Map: + var errs error + errs = multierr.Append(errs, callValidateIfPossible(v)) + iter := v.MapRange() + for iter.Next() { + errs = multierr.Append(errs, validate(iter.Key())) + errs = multierr.Append(errs, validate(iter.Value())) + } + return errs + default: + return callValidateIfPossible(v) + } +} + +func callValidateIfPossible(v reflect.Value) error { + // If the value type implements ConfigValidator just call Validate + if v.Type().Implements(configValidatorType) { + return v.Interface().(ConfigValidator).Validate() + } + + // If the pointer type implements ConfigValidator call Validate on the pointer to the current value. + if reflect.PointerTo(v.Type()).Implements(configValidatorType) { + // If not addressable, then create a new *V pointer and set the value to current v. + if !v.CanAddr() { + pv := reflect.New(reflect.PointerTo(v.Type()).Elem()) + pv.Elem().Set(v) + v = pv.Elem() + } + return v.Addr().Interface().(ConfigValidator).Validate() + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/component/doc.go b/vendor/go.opentelemetry.io/collector/component/doc.go new file mode 100644 index 00000000000..c7ac848a1ef --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package component outlines the components used in the collector +// and provides a foundation for the component’s creation and +// termination process. A component can be either a receiver, exporter, +// processor, an extension, or a connector. +package component // import "go.opentelemetry.io/collector/component" diff --git a/vendor/go.opentelemetry.io/collector/component/host.go b/vendor/go.opentelemetry.io/collector/component/host.go new file mode 100644 index 00000000000..dc8210ffbe3 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/host.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package component // import "go.opentelemetry.io/collector/component" + +// Host represents the entity that is hosting a Component. It is used to allow communication +// between the Component and its host (normally the service.Collector is the host). +// +// Components may require `component.Host` to implement additional interfaces to properly function. +// The component is expected to cast the `component.Host` to the interface it needs and return +// an error if the type assertion fails. +type Host interface { + // GetExtensions returns the map of extensions. Only enabled and created extensions will be returned. + // Typically, it is used to find an extension by type or by full config name. Both cases + // can be done by iterating the returned map. There are typically very few extensions, + // so there are no performance implications due to iteration. + // + // GetExtensions can be called by the component anytime after Component.Start() begins and + // until Component.Shutdown() ends. + GetExtensions() map[ID]Component +} diff --git a/vendor/go.opentelemetry.io/collector/component/identifiable.go b/vendor/go.opentelemetry.io/collector/component/identifiable.go new file mode 100644 index 00000000000..63b890b47ac --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/identifiable.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package component // import "go.opentelemetry.io/collector/component" + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +// typeAndNameSeparator is the separator that is used between type and name in type/name composite keys. +const typeAndNameSeparator = "/" + +var ( + // typeRegexp is used to validate the type of component. + // A type must start with an ASCII alphabetic character and + // can only contain ASCII alphanumeric characters and '_'. + // This must be kept in sync with the regex in cmd/mdatagen/validate.go. + typeRegexp = regexp.MustCompile(`^[a-zA-Z][0-9a-zA-Z_]{0,62}$`) + + // nameRegexp is used to validate the name of a component. A name can consist of + // 1 to 1024 Unicode characters excluding whitespace, control characters, and + // symbols. + nameRegexp = regexp.MustCompile(`^[^\pZ\pC\pS]+$`) +) + +var _ fmt.Stringer = Type{} + +// Type is the component type as it is used in the config. +type Type struct { + name string +} + +// String returns the string representation of the type. +func (t Type) String() string { + return t.name +} + +// MarshalText marshals returns the Type name. +func (t Type) MarshalText() ([]byte, error) { + return []byte(t.name), nil +} + +// NewType creates a type. It returns an error if the type is invalid. +// A type must +// - have at least one character, +// - start with an ASCII alphabetic character and +// - can only contain ASCII alphanumeric characters and '_'. +func NewType(ty string) (Type, error) { + if len(ty) == 0 { + return Type{}, errors.New("id must not be empty") + } + if !typeRegexp.MatchString(ty) { + return Type{}, fmt.Errorf("invalid character(s) in type %q", ty) + } + return Type{name: ty}, nil +} + +// MustNewType creates a type. It panics if the type is invalid. +// A type must +// - have at least one character, +// - start with an ASCII alphabetic character and +// - can only contain ASCII alphanumeric characters and '_'. +func MustNewType(strType string) Type { + ty, err := NewType(strType) + if err != nil { + panic(err) + } + return ty +} + +// ID represents the identity for a component. It combines two values: +// * type - the Type of the component. +// * name - the name of that component. +// The component ID (combination type + name) is unique for a given component.Kind. +type ID struct { + typeVal Type `mapstructure:"-"` + nameVal string `mapstructure:"-"` +} + +// NewID returns a new ID with the given Type and empty name. +func NewID(typeVal Type) ID { + return ID{typeVal: typeVal} +} + +// MustNewID builds a Type and returns a new ID with the given Type and empty name. +// This is equivalent to NewID(MustNewType(typeVal)). +// See MustNewType to check the valid values of typeVal. +func MustNewID(typeVal string) ID { + return NewID(MustNewType(typeVal)) +} + +// NewIDWithName returns a new ID with the given Type and name. +func NewIDWithName(typeVal Type, nameVal string) ID { + return ID{typeVal: typeVal, nameVal: nameVal} +} + +// MustNewIDWithName builds a Type and returns a new ID with the given Type and name. +// This is equivalent to NewIDWithName(MustNewType(typeVal), nameVal). +// See MustNewType to check the valid values of typeVal. +func MustNewIDWithName(typeVal string, nameVal string) ID { + return NewIDWithName(MustNewType(typeVal), nameVal) +} + +// Type returns the type of the component. +func (id ID) Type() Type { + return id.typeVal +} + +// Name returns the custom name of the component. +func (id ID) Name() string { + return id.nameVal +} + +// MarshalText implements the encoding.TextMarshaler interface. +// This marshals the type and name as one string in the config. +func (id ID) MarshalText() (text []byte, err error) { + return []byte(id.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (id *ID) UnmarshalText(text []byte) error { + idStr := string(text) + items := strings.SplitN(idStr, typeAndNameSeparator, 2) + var typeStr, nameStr string + if len(items) >= 1 { + typeStr = strings.TrimSpace(items[0]) + } + + if len(items) == 1 && typeStr == "" { + return errors.New("id must not be empty") + } + + if typeStr == "" { + return fmt.Errorf("in %q id: the part before %s should not be empty", idStr, typeAndNameSeparator) + } + + if len(items) > 1 { + // "name" part is present. + nameStr = strings.TrimSpace(items[1]) + if nameStr == "" { + return fmt.Errorf("in %q id: the part after %s should not be empty", idStr, typeAndNameSeparator) + } + if err := validateName(nameStr); err != nil { + return fmt.Errorf("in %q id: %w", nameStr, err) + } + } + + var err error + if id.typeVal, err = NewType(typeStr); err != nil { + return fmt.Errorf("in %q id: %w", idStr, err) + } + id.nameVal = nameStr + + return nil +} + +// String returns the ID string representation as "type[/name]" format. +func (id ID) String() string { + if id.nameVal == "" { + return id.typeVal.String() + } + + return id.typeVal.String() + typeAndNameSeparator + id.nameVal +} + +func validateName(nameStr string) error { + if len(nameStr) > 1024 { + return fmt.Errorf("name %q is longer than 1024 characters (%d characters)", nameStr, len(nameStr)) + } + if !nameRegexp.MatchString(nameStr) { + return fmt.Errorf("invalid character(s) in name %q", nameStr) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/component/telemetry.go b/vendor/go.opentelemetry.io/collector/component/telemetry.go new file mode 100644 index 00000000000..359562e5f92 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/telemetry.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package component // import "go.opentelemetry.io/collector/component" + +import ( + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// TelemetrySettings provides components with APIs to report telemetry. +type TelemetrySettings struct { + // Logger that the factory can use during creation and can pass to the created + // component to be used later as well. + Logger *zap.Logger + + // TracerProvider that the factory can pass to other instrumented third-party libraries. + TracerProvider trace.TracerProvider + + // MeterProvider that the factory can pass to other instrumented third-party libraries. + MeterProvider metric.MeterProvider + + // MetricsLevel represents the configuration value set when the collector + // is configured. Components may use this level to decide whether it is + // appropriate to avoid computationally expensive calculations. + MetricsLevel configtelemetry.Level + + // Resource contains the resource attributes for the collector's telemetry. + Resource pcommon.Resource +} diff --git a/vendor/go.opentelemetry.io/collector/config/configtelemetry/LICENSE b/vendor/go.opentelemetry.io/collector/config/configtelemetry/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configtelemetry/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/config/configtelemetry/Makefile b/vendor/go.opentelemetry.io/collector/config/configtelemetry/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configtelemetry/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/config/configtelemetry/configtelemetry.go b/vendor/go.opentelemetry.io/collector/config/configtelemetry/configtelemetry.go new file mode 100644 index 00000000000..b8c0967c905 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configtelemetry/configtelemetry.go @@ -0,0 +1,73 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package configtelemetry // import "go.opentelemetry.io/collector/config/configtelemetry" + +import ( + "errors" + "fmt" + "strings" +) + +const ( + // LevelNone indicates that no telemetry data should be collected. + LevelNone Level = iota - 1 + // LevelBasic is the recommended and covers the basics of the service telemetry. + LevelBasic + // LevelNormal adds some other indicators on top of basic. + LevelNormal + // LevelDetailed adds dimensions and views to the previous levels. + LevelDetailed + + levelNoneStr = "None" + levelBasicStr = "Basic" + levelNormalStr = "Normal" + levelDetailedStr = "Detailed" +) + +// Level is the level of internal telemetry (metrics, logs, traces about the component itself) +// that every component should generate. +type Level int32 + +func (l Level) String() string { + switch l { + case LevelNone: + return levelNoneStr + case LevelBasic: + return levelBasicStr + case LevelNormal: + return levelNormalStr + case LevelDetailed: + return levelDetailedStr + } + return "" +} + +// MarshalText marshals Level to text. +func (l Level) MarshalText() (text []byte, err error) { + return []byte(l.String()), nil +} + +// UnmarshalText unmarshalls text to a Level. +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errors.New("cannot unmarshal to a nil *Level") + } + + str := strings.ToLower(string(text)) + switch str { + case strings.ToLower(levelNoneStr): + *l = LevelNone + return nil + case strings.ToLower(levelBasicStr): + *l = LevelBasic + return nil + case strings.ToLower(levelNormalStr): + *l = LevelNormal + return nil + case strings.ToLower(levelDetailedStr): + *l = LevelDetailed + return nil + } + return fmt.Errorf("unknown metrics level %q", str) +} diff --git a/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go b/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go new file mode 100644 index 00000000000..646aeb2d7c7 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package configtelemetry defines various telemetry level for configuration. +// It enables every component to have access to telemetry level +// to enable metrics only when necessary. +// +// This document provides guidance on which telemetry level to adopt for Collector metrics. +// When adopting a telemetry level, component authors are expected to rely on this guidance to +// justify their choice of telemetry level. +// +// 1. configtelemetry.None +// +// No telemetry data is recorded. +// +// 2. configtelemetry.Basic +// +// Telemetry associated with this level provides essential coverage of the collector telemetry. +// It should only be used for internal collector telemetry generated by the collector core API. Components outside of +// the core API MUST NOT record additional telemetry at this level. +// +// 3. configtelemetry.Normal +// +// Telemetry associated with this level provides complete coverage of the collector telemetry. +// It should be the default for component authors. +// +// Component authors using this telemetry level can use this guidance: +// +// - The signals associated with this level must control cardinality. +// It is acceptable at this level for cardinality to scale linearly with the monitored resources. +// +// - The signals associated with this level must represent a controlled data volume. Examples follow: +// +// a. A max cardinality (total possible combinations of dimension values) for a given metric of at most 100. +// +// b. At most 5 spans actively recording simultaneously per active request. +// +// This is the default level recommended when running the Collector. +// +// 4. configtelemetry.Detailed +// +// Telemetry associated with this level provides complete coverage of the collector telemetry. +// +// The signals associated with this level may exhibit high cardinality and/or high dimensionality. +// +// There is no limit on data volume. +package configtelemetry // import "go.opentelemetry.io/collector/config/configtelemetry" diff --git a/vendor/go.opentelemetry.io/collector/consumer/LICENSE b/vendor/go.opentelemetry.io/collector/consumer/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/consumer/Makefile b/vendor/go.opentelemetry.io/collector/consumer/Makefile new file mode 100644 index 00000000000..39734bfaebb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/Makefile @@ -0,0 +1 @@ +include ../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumer.go b/vendor/go.opentelemetry.io/collector/consumer/consumer.go new file mode 100644 index 00000000000..b1b588a85c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumer.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package consumer // import "go.opentelemetry.io/collector/consumer" + +import ( + "errors" + + "go.opentelemetry.io/collector/consumer/internal" +) + +// Capabilities describes the capabilities of a Processor. +type Capabilities = internal.Capabilities + +var errNilFunc = errors.New("nil consumer func") + +// Option to construct new consumers. +type Option = internal.Option + +// WithCapabilities overrides the default GetCapabilities function for a processor. +// The default GetCapabilities function returns mutable capabilities. +func WithCapabilities(capabilities Capabilities) Option { + return internal.OptionFunc(func(o *internal.BaseImpl) { + o.Cap = capabilities + }) +} diff --git a/vendor/go.opentelemetry.io/collector/consumer/doc.go b/vendor/go.opentelemetry.io/collector/consumer/doc.go new file mode 100644 index 00000000000..b2ea0535b79 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/doc.go @@ -0,0 +1,5 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package consumer contains interfaces that receive and process data. +package consumer // import "go.opentelemetry.io/collector/consumer" diff --git a/vendor/go.opentelemetry.io/collector/consumer/internal/consumer.go b/vendor/go.opentelemetry.io/collector/consumer/internal/consumer.go new file mode 100644 index 00000000000..45c90dd4341 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/internal/consumer.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/consumer/internal" + +// Capabilities describes the capabilities of a Processor. +type Capabilities struct { + // MutatesData is set to true if Consume* function of the + // processor modifies the input Traces, Logs or Metrics argument. + // Processors which modify the input data MUST set this flag to true. If the processor + // does not modify the data it MUST set this flag to false. If the processor creates + // a copy of the data before modifying then this flag can be safely set to false. + MutatesData bool +} + +type BaseConsumer interface { + Capabilities() Capabilities +} + +type BaseImpl struct { + Cap Capabilities +} + +// Option to construct new consumers. +type Option interface { + apply(*BaseImpl) +} + +type OptionFunc func(*BaseImpl) + +func (of OptionFunc) apply(e *BaseImpl) { + of(e) +} + +// Capabilities returns the capabilities of the component +func (bs BaseImpl) Capabilities() Capabilities { + return bs.Cap +} + +func NewBaseImpl(options ...Option) *BaseImpl { + bs := &BaseImpl{ + Cap: Capabilities{MutatesData: false}, + } + + for _, op := range options { + op.apply(bs) + } + + return bs +} diff --git a/vendor/go.opentelemetry.io/collector/consumer/logs.go b/vendor/go.opentelemetry.io/collector/consumer/logs.go new file mode 100644 index 00000000000..15166ef1196 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/logs.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package consumer // import "go.opentelemetry.io/collector/consumer" + +import ( + "context" + + "go.opentelemetry.io/collector/consumer/internal" + "go.opentelemetry.io/collector/pdata/plog" +) + +// Logs is an interface that receives plog.Logs, processes it +// as needed, and sends it to the next processing node if any or to the destination. +type Logs interface { + internal.BaseConsumer + // ConsumeLogs receives plog.Logs for consumption. + ConsumeLogs(ctx context.Context, ld plog.Logs) error +} + +// ConsumeLogsFunc is a helper function that is similar to ConsumeLogs. +type ConsumeLogsFunc func(ctx context.Context, ld plog.Logs) error + +// ConsumeLogs calls f(ctx, ld). +func (f ConsumeLogsFunc) ConsumeLogs(ctx context.Context, ld plog.Logs) error { + return f(ctx, ld) +} + +type baseLogs struct { + *internal.BaseImpl + ConsumeLogsFunc +} + +// NewLogs returns a Logs configured with the provided options. +func NewLogs(consume ConsumeLogsFunc, options ...Option) (Logs, error) { + if consume == nil { + return nil, errNilFunc + } + return &baseLogs{ + BaseImpl: internal.NewBaseImpl(options...), + ConsumeLogsFunc: consume, + }, nil +} diff --git a/vendor/go.opentelemetry.io/collector/consumer/metrics.go b/vendor/go.opentelemetry.io/collector/consumer/metrics.go new file mode 100644 index 00000000000..47897f9363a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/metrics.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package consumer // import "go.opentelemetry.io/collector/consumer" + +import ( + "context" + + "go.opentelemetry.io/collector/consumer/internal" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +// Metrics is an interface that receives pmetric.Metrics, processes it +// as needed, and sends it to the next processing node if any or to the destination. +type Metrics interface { + internal.BaseConsumer + // ConsumeMetrics receives pmetric.Metrics for consumption. + ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error +} + +// ConsumeMetricsFunc is a helper function that is similar to ConsumeMetrics. +type ConsumeMetricsFunc func(ctx context.Context, md pmetric.Metrics) error + +// ConsumeMetrics calls f(ctx, md). +func (f ConsumeMetricsFunc) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { + return f(ctx, md) +} + +type baseMetrics struct { + *internal.BaseImpl + ConsumeMetricsFunc +} + +// NewMetrics returns a Metrics configured with the provided options. +func NewMetrics(consume ConsumeMetricsFunc, options ...Option) (Metrics, error) { + if consume == nil { + return nil, errNilFunc + } + return &baseMetrics{ + BaseImpl: internal.NewBaseImpl(options...), + ConsumeMetricsFunc: consume, + }, nil +} diff --git a/vendor/go.opentelemetry.io/collector/consumer/traces.go b/vendor/go.opentelemetry.io/collector/consumer/traces.go new file mode 100644 index 00000000000..60df2d04536 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/traces.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package consumer // import "go.opentelemetry.io/collector/consumer" + +import ( + "context" + + "go.opentelemetry.io/collector/consumer/internal" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +// Traces is an interface that receives ptrace.Traces, processes it +// as needed, and sends it to the next processing node if any or to the destination. +type Traces interface { + internal.BaseConsumer + // ConsumeTraces receives ptrace.Traces for consumption. + ConsumeTraces(ctx context.Context, td ptrace.Traces) error +} + +// ConsumeTracesFunc is a helper function that is similar to ConsumeTraces. +type ConsumeTracesFunc func(ctx context.Context, td ptrace.Traces) error + +// ConsumeTraces calls f(ctx, td). +func (f ConsumeTracesFunc) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { + return f(ctx, td) +} + +type baseTraces struct { + *internal.BaseImpl + ConsumeTracesFunc +} + +// NewTraces returns a Traces configured with the provided options. +func NewTraces(consume ConsumeTracesFunc, options ...Option) (Traces, error) { + if consume == nil { + return nil, errNilFunc + } + return &baseTraces{ + BaseImpl: internal.NewBaseImpl(options...), + ConsumeTracesFunc: consume, + }, nil +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go index 65b11fbe286..1fb0f2b6857 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go @@ -228,7 +228,8 @@ type ResourceLogs struct { // A list of ScopeLogs that originate from a resource. ScopeLogs []*ScopeLogs `protobuf:"bytes,2,rep,name=scope_logs,json=scopeLogs,proto3" json:"scope_logs,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_logs" field which have their own schema_url field. @@ -305,7 +306,8 @@ type ScopeLogs struct { // A list of log records. LogRecords []*LogRecord `protobuf:"bytes,2,rep,name=log_records,json=logRecords,proto3" json:"log_records,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the log data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all logs in the "logs" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` @@ -434,6 +436,19 @@ type LogRecord struct { // - the field is not present, // - the field contains an invalid value. SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"` + // A unique identifier of event category/type. + // All events with the same event_name are expected to conform to the same + // schema for both their attributes and their body. + // + // Recommended to be fully qualified and short (no longer than 256 characters). + // + // Presence of event_name on the log record identifies this record + // as an event. + // + // [Optional]. + // + // Status: [Development] + EventName string `protobuf:"bytes,12,opt,name=event_name,json=eventName,proto3" json:"event_name,omitempty"` } func (m *LogRecord) Reset() { *m = LogRecord{} } @@ -525,6 +540,13 @@ func (m *LogRecord) GetFlags() uint32 { return 0 } +func (m *LogRecord) GetEventName() string { + if m != nil { + return m.EventName + } + return "" +} + func init() { proto.RegisterEnum("opentelemetry.proto.logs.v1.SeverityNumber", SeverityNumber_name, SeverityNumber_value) proto.RegisterEnum("opentelemetry.proto.logs.v1.LogRecordFlags", LogRecordFlags_name, LogRecordFlags_value) @@ -539,67 +561,68 @@ func init() { } var fileDescriptor_d1c030a3ec7e961e = []byte{ - // 951 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xcf, 0x6e, 0xea, 0x46, - 0x14, 0xc6, 0x31, 0xe1, 0xef, 0x84, 0x70, 0xa7, 0x73, 0x49, 0xae, 0x9b, 0xa8, 0x84, 0xa6, 0x55, - 0x4a, 0x53, 0x09, 0x14, 0xa0, 0xd2, 0xed, 0xae, 0x26, 0x98, 0x88, 0x86, 0x40, 0x34, 0x40, 0xaa, - 0xdc, 0x56, 0xb2, 0x0c, 0x9e, 0x52, 0x4b, 0xc6, 0x63, 0xd9, 0x03, 0x4a, 0xf6, 0x7d, 0x80, 0x3e, - 0x41, 0xb7, 0x95, 0xfa, 0x1a, 0xed, 0xe2, 0x2e, 0xef, 0xb2, 0xea, 0x22, 0xaa, 0x92, 0x4d, 0xdf, - 0xa2, 0xd5, 0x0c, 0x86, 0x90, 0xd4, 0x4e, 0x6e, 0x56, 0x99, 0x39, 0xbf, 0xef, 0x7c, 0xe7, 0x8c, - 0x4f, 0x3c, 0x18, 0xec, 0x53, 0x87, 0xd8, 0x8c, 0x58, 0x64, 0x42, 0x98, 0x7b, 0x55, 0x76, 0x5c, - 0xca, 0x68, 0xd9, 0xa2, 0x63, 0xaf, 0x3c, 0x3b, 0x14, 0x7f, 0x4b, 0x22, 0x84, 0x76, 0xee, 0xe9, - 0xe6, 0xc1, 0x92, 0xe0, 0xb3, 0xc3, 0xed, 0xdc, 0x98, 0x8e, 0xe9, 0x3c, 0x95, 0xaf, 0xe6, 0x74, - 0xfb, 0x20, 0xc8, 0x7a, 0x44, 0x27, 0x13, 0x6a, 0x73, 0xf3, 0xf9, 0xca, 0xd7, 0x96, 0x82, 0xb4, - 0x2e, 0xf1, 0xe8, 0xd4, 0x1d, 0x11, 0xae, 0x5e, 0xac, 0xe7, 0xfa, 0xbd, 0x37, 0x20, 0xd5, 0xa6, - 0x63, 0xaf, 0xa1, 0x33, 0x1d, 0x75, 0xc0, 0xc6, 0x82, 0x6a, 0xbc, 0x23, 0x59, 0x2a, 0xac, 0x15, - 0xd7, 0x2b, 0x9f, 0x97, 0x1e, 0x69, 0xb9, 0x84, 0xfd, 0x0c, 0xee, 0x82, 0x33, 0xee, 0xca, 0x6e, - 0xef, 0x97, 0x28, 0xc8, 0xac, 0x62, 0xf4, 0x1d, 0xd8, 0x34, 0x88, 0xe3, 0x92, 0x91, 0xce, 0x88, - 0xa1, 0x79, 0x23, 0xea, 0xf8, 0x85, 0xfe, 0x49, 0x8a, 0x4a, 0xfb, 0x8f, 0x56, 0xea, 0x71, 0xbd, - 0x28, 0xf3, 0xf2, 0xce, 0x65, 0x19, 0x44, 0x27, 0x20, 0xb5, 0xa8, 0x2e, 0x4b, 0x05, 0x29, 0xb4, - 0xf1, 0xe5, 0x03, 0x58, 0x69, 0xbe, 0x1e, 0x7b, 0x7b, 0xbd, 0x1b, 0xc1, 0x4b, 0x03, 0xa4, 0x02, - 0xb0, 0xd2, 0x5e, 0xf4, 0x59, 0xdd, 0xa5, 0xbd, 0x65, 0x4f, 0x1f, 0x71, 0x9b, 0x1f, 0xc9, 0x44, - 0xd7, 0xa6, 0xae, 0x25, 0xaf, 0x15, 0xa4, 0x62, 0x9a, 0x63, 0x1e, 0x19, 0xb8, 0xd6, 0xde, 0x1f, - 0x12, 0x48, 0xdf, 0x1d, 0xa0, 0x0b, 0xe2, 0x22, 0xd3, 0xef, 0xbe, 0x1a, 0x58, 0xce, 0x1f, 0xf6, - 0xec, 0xb0, 0xd4, 0xb2, 0x3d, 0xe6, 0x4e, 0x27, 0xc4, 0x66, 0x3a, 0x33, 0xa9, 0x2d, 0x7c, 0xfc, - 0x73, 0xcc, 0x7d, 0xd0, 0x31, 0x58, 0xb7, 0xe8, 0x58, 0x73, 0xc9, 0x88, 0xba, 0xc6, 0xfb, 0x9d, - 0xa2, 0x4d, 0xc7, 0x58, 0xc8, 0x31, 0xb0, 0x16, 0xcb, 0x27, 0x8f, 0xf1, 0x53, 0x1c, 0xa4, 0x97, - 0x89, 0xe8, 0x53, 0x90, 0x65, 0xe6, 0x84, 0x68, 0x53, 0xdb, 0xbc, 0xd4, 0x6c, 0xdd, 0xa6, 0xe2, - 0x3c, 0x09, 0x9c, 0xe1, 0xd1, 0x81, 0x6d, 0x5e, 0x76, 0x74, 0x9b, 0xa2, 0x2f, 0xc1, 0x2b, 0x3a, - 0xf4, 0x88, 0x3b, 0x23, 0x86, 0xf6, 0x40, 0xbe, 0x2e, 0xe4, 0xb9, 0x05, 0xee, 0xaf, 0xa6, 0xf5, - 0xc1, 0x0b, 0x8f, 0xcc, 0x88, 0x6b, 0xb2, 0x2b, 0xcd, 0x9e, 0x4e, 0x86, 0xc4, 0x95, 0xa3, 0x05, - 0xa9, 0x98, 0xad, 0x7c, 0xf1, 0xf8, 0x70, 0xfc, 0x9c, 0x8e, 0x48, 0xc1, 0x59, 0xef, 0xde, 0x1e, - 0x7d, 0x02, 0x36, 0x96, 0xae, 0x8c, 0x5c, 0x32, 0xff, 0x88, 0x99, 0x45, 0xb0, 0x4f, 0x2e, 0x19, - 0x52, 0x40, 0x6c, 0x48, 0x8d, 0x2b, 0x39, 0x2e, 0xa6, 0xf3, 0xd9, 0x13, 0xd3, 0x51, 0xec, 0xab, - 0x73, 0xdd, 0x9a, 0x2e, 0x26, 0x22, 0x52, 0xd1, 0x29, 0x00, 0x3a, 0x63, 0xae, 0x39, 0x9c, 0x32, - 0xe2, 0xc9, 0x09, 0x31, 0x8f, 0xa7, 0x8c, 0x4e, 0xc8, 0x3d, 0xa3, 0x15, 0x03, 0xf4, 0x1a, 0xc8, - 0x86, 0x4b, 0x1d, 0x87, 0x18, 0xda, 0x5d, 0x54, 0x1b, 0xd1, 0xa9, 0xcd, 0xe4, 0x64, 0x41, 0x2a, - 0x6e, 0xe0, 0x2d, 0x9f, 0x2b, 0x4b, 0x7c, 0xc4, 0x29, 0xca, 0x81, 0xf8, 0x0f, 0x96, 0x3e, 0xf6, - 0xe4, 0x54, 0x41, 0x2a, 0x26, 0xf1, 0x7c, 0x83, 0xbe, 0x07, 0x29, 0xe6, 0xea, 0x23, 0xa2, 0x99, - 0x86, 0x9c, 0x2e, 0x48, 0xc5, 0x4c, 0x5d, 0xe1, 0x35, 0xff, 0xba, 0xde, 0xfd, 0x6a, 0x4c, 0x1f, - 0xb4, 0x69, 0xf2, 0x1b, 0xc8, 0xb2, 0xc8, 0x88, 0x51, 0xb7, 0xec, 0x18, 0x3a, 0xd3, 0xcb, 0xa6, - 0xcd, 0x88, 0x6b, 0xeb, 0x56, 0x99, 0xef, 0x4a, 0x7d, 0xee, 0xd4, 0x6a, 0xe0, 0xa4, 0xb0, 0x6c, - 0x19, 0xe8, 0x02, 0x24, 0x3d, 0x47, 0xb7, 0xb9, 0x39, 0x10, 0xe6, 0x5f, 0xfb, 0xe6, 0xaf, 0x9f, - 0x6f, 0xde, 0x73, 0x74, 0xbb, 0xd5, 0xc0, 0x09, 0x6e, 0xd8, 0x32, 0xbe, 0x89, 0xa5, 0x62, 0x30, - 0x7e, 0xf0, 0x7b, 0x1c, 0x64, 0xef, 0x0f, 0x1a, 0xed, 0x82, 0x9d, 0x9e, 0x7a, 0xae, 0xe2, 0x56, - 0xff, 0x42, 0xeb, 0x0c, 0x4e, 0xeb, 0x2a, 0xd6, 0x06, 0x9d, 0xde, 0x99, 0x7a, 0xd4, 0x6a, 0xb6, - 0xd4, 0x06, 0x8c, 0xa0, 0x0f, 0xc1, 0xe6, 0x43, 0x41, 0x1f, 0x2b, 0x47, 0x2a, 0x94, 0xd0, 0x36, - 0xd8, 0x0a, 0x44, 0x15, 0x18, 0x0d, 0x65, 0x55, 0xb8, 0x16, 0xca, 0x6a, 0x30, 0x16, 0x54, 0xae, - 0xa1, 0xd6, 0x07, 0xc7, 0x30, 0x1e, 0x94, 0x26, 0x50, 0x05, 0x26, 0x42, 0x59, 0x15, 0x26, 0x43, - 0x59, 0x0d, 0xa6, 0x90, 0x0c, 0x72, 0x0f, 0x59, 0xab, 0xd3, 0xec, 0xc2, 0x74, 0x50, 0x23, 0x9c, - 0x54, 0x20, 0x08, 0x43, 0x55, 0xb8, 0x1e, 0x86, 0x6a, 0x30, 0x13, 0x54, 0xea, 0x5b, 0x05, 0x77, - 0xe0, 0x46, 0x50, 0x12, 0x27, 0x15, 0x98, 0x0d, 0x43, 0x55, 0xf8, 0x22, 0x0c, 0xd5, 0x20, 0x0c, - 0x42, 0x2a, 0xc6, 0x5d, 0x0c, 0x3f, 0x08, 0x7a, 0x18, 0x02, 0x55, 0x20, 0x0a, 0x65, 0x55, 0xf8, - 0x32, 0x94, 0xd5, 0x60, 0x2e, 0xa8, 0x5c, 0x53, 0xe9, 0x2b, 0x6d, 0xb8, 0x19, 0x94, 0x26, 0x50, - 0x05, 0x6e, 0x85, 0xb2, 0x2a, 0x7c, 0x15, 0xca, 0x6a, 0x50, 0x3e, 0xb8, 0x00, 0xd9, 0xe5, 0x5d, - 0xda, 0x14, 0xaf, 0xe5, 0x2e, 0xd8, 0x69, 0x77, 0x8f, 0x35, 0xac, 0x1e, 0x75, 0x71, 0x43, 0x6b, - 0xb6, 0x95, 0xe3, 0x9e, 0xd6, 0xe8, 0x6a, 0x9d, 0x6e, 0x5f, 0x1b, 0xf4, 0x54, 0x18, 0x41, 0xfb, - 0xe0, 0xe3, 0xff, 0x09, 0xc4, 0xbf, 0x9c, 0xbf, 0x3e, 0x55, 0x7a, 0x27, 0xf0, 0x5f, 0xa9, 0xfe, - 0xab, 0xf4, 0xf6, 0x26, 0x2f, 0xbd, 0xbb, 0xc9, 0x4b, 0x7f, 0xdf, 0xe4, 0xa5, 0x9f, 0x6f, 0xf3, - 0x91, 0x77, 0xb7, 0xf9, 0xc8, 0x9f, 0xb7, 0xf9, 0x08, 0xc8, 0x9b, 0xf4, 0xb1, 0x0b, 0xb4, 0xce, - 0xef, 0x77, 0xef, 0x8c, 0x87, 0xce, 0xa4, 0x37, 0xf5, 0x67, 0xbf, 0xb0, 0xf3, 0xef, 0x90, 0x31, - 0xb1, 0x17, 0x5f, 0x44, 0xbf, 0x45, 0x77, 0xba, 0x0e, 0xb1, 0xfb, 0x4b, 0x07, 0xe1, 0xcd, 0x7f, - 0x7e, 0xbc, 0xd2, 0xf9, 0xe1, 0x30, 0x21, 0xf4, 0xd5, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x48, - 0x86, 0x67, 0x14, 0x55, 0x09, 0x00, 0x00, + // 971 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0x41, 0x6f, 0xe2, 0x46, + 0x1b, 0xc7, 0x71, 0x12, 0x02, 0x4c, 0x08, 0x3b, 0xef, 0x2c, 0xc9, 0xfa, 0x4d, 0x54, 0x42, 0xd3, + 0x2a, 0xa5, 0xa9, 0x04, 0x0a, 0x50, 0x69, 0x7b, 0xab, 0x09, 0x26, 0xa2, 0x21, 0x10, 0x0d, 0x90, + 0x2a, 0xdb, 0x4a, 0x96, 0xc1, 0x53, 0x6a, 0xc9, 0xcc, 0x58, 0xf6, 0x80, 0x92, 0x6f, 0xd1, 0x4f, + 0xd0, 0x4b, 0x0f, 0x95, 0xfa, 0x35, 0xda, 0xc3, 0x1e, 0xf7, 0x58, 0xf5, 0xb0, 0xaa, 0x92, 0x4b, + 0xbf, 0x45, 0xab, 0x19, 0x0c, 0x21, 0xa9, 0x9d, 0x34, 0x27, 0x66, 0x9e, 0xdf, 0xff, 0xf9, 0x3f, + 0xcf, 0x78, 0xc6, 0x83, 0xc1, 0x01, 0x73, 0x09, 0xe5, 0xc4, 0x21, 0x63, 0xc2, 0xbd, 0xeb, 0x92, + 0xeb, 0x31, 0xce, 0x4a, 0x0e, 0x1b, 0xf9, 0xa5, 0xe9, 0x91, 0xfc, 0x2d, 0xca, 0x10, 0xda, 0xbd, + 0xa7, 0x9b, 0x05, 0x8b, 0x92, 0x4f, 0x8f, 0x76, 0xb2, 0x23, 0x36, 0x62, 0xb3, 0x54, 0x31, 0x9a, + 0xd1, 0x9d, 0xc3, 0x30, 0xeb, 0x21, 0x1b, 0x8f, 0x19, 0x15, 0xe6, 0xb3, 0x51, 0xa0, 0x2d, 0x86, + 0x69, 0x3d, 0xe2, 0xb3, 0x89, 0x37, 0x24, 0x42, 0x3d, 0x1f, 0xcf, 0xf4, 0xfb, 0x6f, 0x40, 0xb2, + 0xc5, 0x46, 0x7e, 0xdd, 0xe4, 0x26, 0x6a, 0x83, 0xcd, 0x39, 0x35, 0x44, 0x47, 0xaa, 0x92, 0x5f, + 0x2d, 0x6c, 0x94, 0x3f, 0x2d, 0x3e, 0xd2, 0x72, 0x11, 0x07, 0x19, 0xc2, 0x05, 0xa7, 0xbd, 0xa5, + 0xd9, 0xfe, 0x8f, 0x2b, 0x20, 0xbd, 0x8c, 0xd1, 0x37, 0x60, 0xcb, 0x22, 0xae, 0x47, 0x86, 0x26, + 0x27, 0x96, 0xe1, 0x0f, 0x99, 0x1b, 0x14, 0xfa, 0x2b, 0x21, 0x2b, 0x1d, 0x3c, 0x5a, 0xa9, 0x2b, + 0xf4, 0xb2, 0xcc, 0xcb, 0x3b, 0x97, 0x45, 0x10, 0x9d, 0x82, 0xe4, 0xbc, 0xba, 0xaa, 0xe4, 0x95, + 0xc8, 0xc6, 0x17, 0x0f, 0x60, 0xa9, 0xf9, 0xda, 0xda, 0xdb, 0xf7, 0x7b, 0x31, 0xbc, 0x30, 0x40, + 0x3a, 0x00, 0x4b, 0xed, 0xad, 0x3c, 0xab, 0xbb, 0x94, 0xbf, 0xe8, 0xe9, 0x03, 0x61, 0xf3, 0x3d, + 0x19, 0x9b, 0xc6, 0xc4, 0x73, 0xd4, 0xd5, 0xbc, 0x52, 0x48, 0x09, 0x2c, 0x22, 0x7d, 0xcf, 0xd9, + 0xff, 0x4d, 0x01, 0xa9, 0xbb, 0x05, 0x74, 0x40, 0x5c, 0x66, 0x06, 0xdd, 0x57, 0x42, 0xcb, 0x05, + 0x9b, 0x3d, 0x3d, 0x2a, 0x36, 0xa9, 0xcf, 0xbd, 0xc9, 0x98, 0x50, 0x6e, 0x72, 0x9b, 0x51, 0xe9, + 0x13, 0xac, 0x63, 0xe6, 0x83, 0x4e, 0xc0, 0x86, 0xc3, 0x46, 0x86, 0x47, 0x86, 0xcc, 0xb3, 0xfe, + 0xdb, 0x2a, 0x5a, 0x6c, 0x84, 0xa5, 0x1c, 0x03, 0x67, 0x3e, 0x7c, 0x72, 0x19, 0x3f, 0xc5, 0x41, + 0x6a, 0x91, 0x88, 0x3e, 0x06, 0x19, 0x6e, 0x8f, 0x89, 0x31, 0xa1, 0xf6, 0x95, 0x41, 0x4d, 0xca, + 0xe4, 0x7a, 0xd6, 0x71, 0x5a, 0x44, 0xfb, 0xd4, 0xbe, 0x6a, 0x9b, 0x94, 0xa1, 0xcf, 0xc1, 0x2b, + 0x36, 0xf0, 0x89, 0x37, 0x25, 0x96, 0xf1, 0x40, 0xbe, 0x21, 0xe5, 0xd9, 0x39, 0xee, 0x2d, 0xa7, + 0xf5, 0xc0, 0x0b, 0x9f, 0x4c, 0x89, 0x67, 0xf3, 0x6b, 0x83, 0x4e, 0xc6, 0x03, 0xe2, 0xa9, 0x2b, + 0x79, 0xa5, 0x90, 0x29, 0x7f, 0xf6, 0xf8, 0xe6, 0x04, 0x39, 0x6d, 0x99, 0x82, 0x33, 0xfe, 0xbd, + 0x39, 0xfa, 0x08, 0x6c, 0x2e, 0x5c, 0x39, 0xb9, 0xe2, 0xc1, 0x12, 0xd3, 0xf3, 0x60, 0x8f, 0x5c, + 0x71, 0xa4, 0x81, 0xb5, 0x01, 0xb3, 0xae, 0xd5, 0xb8, 0xdc, 0x9d, 0x4f, 0x9e, 0xd8, 0x1d, 0x8d, + 0x5e, 0x5f, 0x98, 0xce, 0x64, 0xbe, 0x23, 0x32, 0x15, 0x9d, 0x01, 0x60, 0x72, 0xee, 0xd9, 0x83, + 0x09, 0x27, 0xbe, 0xba, 0x2e, 0xf7, 0xe3, 0x29, 0xa3, 0x53, 0x72, 0xcf, 0x68, 0xc9, 0x00, 0xbd, + 0x06, 0xaa, 0xe5, 0x31, 0xd7, 0x25, 0x96, 0x71, 0x17, 0x35, 0x86, 0x6c, 0x42, 0xb9, 0x9a, 0xc8, + 0x2b, 0x85, 0x4d, 0xbc, 0x1d, 0x70, 0x6d, 0x81, 0x8f, 0x05, 0x45, 0x59, 0x10, 0xff, 0xce, 0x31, + 0x47, 0xbe, 0x9a, 0xcc, 0x2b, 0x85, 0x04, 0x9e, 0x4d, 0xd0, 0xb7, 0x20, 0xc9, 0x3d, 0x73, 0x48, + 0x0c, 0xdb, 0x52, 0x53, 0x79, 0xa5, 0x90, 0xae, 0x69, 0xa2, 0xe6, 0x1f, 0xef, 0xf7, 0xbe, 0x18, + 0xb1, 0x07, 0x6d, 0xda, 0xe2, 0x06, 0x72, 0x1c, 0x32, 0xe4, 0xcc, 0x2b, 0xb9, 0x96, 0xc9, 0xcd, + 0x92, 0x4d, 0x39, 0xf1, 0xa8, 0xe9, 0x94, 0xc4, 0xac, 0xd8, 0x13, 0x4e, 0xcd, 0x3a, 0x4e, 0x48, + 0xcb, 0xa6, 0x85, 0x2e, 0x41, 0xc2, 0x77, 0x4d, 0x2a, 0xcc, 0x81, 0x34, 0xff, 0x32, 0x30, 0x7f, + 0xfd, 0x7c, 0xf3, 0xae, 0x6b, 0xd2, 0x66, 0x1d, 0xaf, 0x0b, 0xc3, 0xa6, 0x25, 0xce, 0x27, 0x99, + 0x12, 0xca, 0x0d, 0x6a, 0x8e, 0x89, 0x9a, 0x9e, 0x9d, 0x4f, 0x19, 0x69, 0x9b, 0x63, 0xf2, 0xd5, + 0x5a, 0x72, 0x0d, 0xc6, 0x0f, 0x7f, 0x8d, 0x83, 0xcc, 0xfd, 0x73, 0x80, 0xf6, 0xc0, 0x6e, 0x57, + 0xbf, 0xd0, 0x71, 0xb3, 0x77, 0x69, 0xb4, 0xfb, 0x67, 0x35, 0x1d, 0x1b, 0xfd, 0x76, 0xf7, 0x5c, + 0x3f, 0x6e, 0x36, 0x9a, 0x7a, 0x1d, 0xc6, 0xd0, 0xff, 0xc1, 0xd6, 0x43, 0x41, 0x0f, 0x6b, 0xc7, + 0x3a, 0x54, 0xd0, 0x0e, 0xd8, 0x0e, 0x45, 0x65, 0xb8, 0x12, 0xc9, 0x2a, 0x70, 0x35, 0x92, 0x55, + 0xe1, 0x5a, 0x58, 0xb9, 0xba, 0x5e, 0xeb, 0x9f, 0xc0, 0x78, 0x58, 0x9a, 0x44, 0x65, 0xb8, 0x1e, + 0xc9, 0x2a, 0x30, 0x11, 0xc9, 0xaa, 0x30, 0x89, 0x54, 0x90, 0x7d, 0xc8, 0x9a, 0xed, 0x46, 0x07, + 0xa6, 0xc2, 0x1a, 0x11, 0xa4, 0x0c, 0x41, 0x14, 0xaa, 0xc0, 0x8d, 0x28, 0x54, 0x85, 0xe9, 0xb0, + 0x52, 0x5f, 0x6b, 0xb8, 0x0d, 0x37, 0xc3, 0x92, 0x04, 0x29, 0xc3, 0x4c, 0x14, 0xaa, 0xc0, 0x17, + 0x51, 0xa8, 0x0a, 0x61, 0x18, 0xd2, 0x31, 0xee, 0x60, 0xf8, 0xbf, 0xb0, 0x87, 0x21, 0x51, 0x19, + 0xa2, 0x48, 0x56, 0x81, 0x2f, 0x23, 0x59, 0x15, 0x66, 0xc3, 0xca, 0x35, 0xb4, 0x9e, 0xd6, 0x82, + 0x5b, 0x61, 0x69, 0x12, 0x95, 0xe1, 0x76, 0x24, 0xab, 0xc0, 0x57, 0x91, 0xac, 0x0a, 0xd5, 0xc3, + 0x4b, 0x90, 0x59, 0x5c, 0xb5, 0x0d, 0xf9, 0xd6, 0xee, 0x81, 0xdd, 0x56, 0xe7, 0xc4, 0xc0, 0xfa, + 0x71, 0x07, 0xd7, 0x8d, 0x46, 0x4b, 0x3b, 0xe9, 0x1a, 0xf5, 0x8e, 0xd1, 0xee, 0xf4, 0x8c, 0x7e, + 0x57, 0x87, 0x31, 0x74, 0x00, 0x3e, 0xfc, 0x97, 0x40, 0x1e, 0xb9, 0x60, 0x7c, 0xa6, 0x75, 0x4f, + 0xe1, 0xdf, 0x4a, 0xed, 0x67, 0xe5, 0xed, 0x4d, 0x4e, 0x79, 0x77, 0x93, 0x53, 0xfe, 0xbc, 0xc9, + 0x29, 0x3f, 0xdc, 0xe6, 0x62, 0xef, 0x6e, 0x73, 0xb1, 0xdf, 0x6f, 0x73, 0x31, 0x90, 0xb3, 0xd9, + 0x63, 0xf7, 0x6b, 0x4d, 0x5c, 0xff, 0xfe, 0xb9, 0x08, 0x9d, 0x2b, 0x6f, 0x6a, 0xcf, 0x7e, 0x9f, + 0x67, 0x9f, 0x29, 0x23, 0x42, 0xe7, 0x1f, 0x4c, 0xbf, 0xac, 0xec, 0x76, 0x5c, 0x42, 0x7b, 0x0b, + 0x07, 0xe9, 0x2d, 0xfe, 0x9d, 0xfc, 0xe2, 0xc5, 0xd1, 0x60, 0x5d, 0xea, 0x2b, 0xff, 0x04, 0x00, + 0x00, 0xff, 0xff, 0xc9, 0xbc, 0x36, 0x44, 0x74, 0x09, 0x00, 0x00, } func (m *LogsData) Marshal() (dAtA []byte, err error) { @@ -783,6 +806,13 @@ func (m *LogRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.EventName) > 0 { + i -= len(m.EventName) + copy(dAtA[i:], m.EventName) + i = encodeVarintLogs(dAtA, i, uint64(len(m.EventName))) + i-- + dAtA[i] = 0x62 + } if m.ObservedTimeUnixNano != 0 { i -= 8 encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ObservedTimeUnixNano)) @@ -976,6 +1006,10 @@ func (m *LogRecord) Size() (n int) { if m.ObservedTimeUnixNano != 0 { n += 9 } + l = len(m.EventName) + if l > 0 { + n += 1 + l + sovLogs(uint64(l)) + } return n } @@ -1663,6 +1697,38 @@ func (m *LogRecord) Unmarshal(dAtA []byte) error { } m.ObservedTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogs + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EventName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLogs(dAtA[iNdEx:]) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go index 66143337964..9021e432c52 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go @@ -243,7 +243,8 @@ type ResourceMetrics struct { // A list of metrics that originate from a resource. ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_metrics" field which have their own schema_url field. @@ -320,7 +321,8 @@ type ScopeMetrics struct { // A list of metrics that originate from an instrumentation library. Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the metric data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all metrics in the "metrics" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` @@ -1106,7 +1108,7 @@ type HistogramDataPoint struct { // events, and is assumed to be monotonic over the values of these events. // Negative events *can* be recorded, but sum should not be filled out when // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram // // Types that are valid to be assigned to Sum_: // *HistogramDataPoint_Sum @@ -1351,7 +1353,7 @@ type ExponentialHistogramDataPoint struct { // events, and is assumed to be monotonic over the values of these events. // Negative events *can* be recorded, but sum should not be filled out when // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram // // Types that are valid to be assigned to Sum_: // *ExponentialHistogramDataPoint_Sum @@ -1694,7 +1696,7 @@ type SummaryDataPoint struct { // events, and is assumed to be monotonic over the values of these events. // Negative events *can* be recorded, but sum should not be filled out when // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary + // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` // (Optional) list of values at different quantiles of the distribution calculated // from the current snapshot. The quantiles must be strictly increasing. diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go index faa8630469c..bafc9d85f09 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go @@ -183,7 +183,8 @@ type ResourceProfiles struct { // A list of ScopeProfiles that originate from a resource. ScopeProfiles []*ScopeProfiles `protobuf:"bytes,2,rep,name=scope_profiles,json=scopeProfiles,proto3" json:"scope_profiles,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_profiles" field which have their own schema_url field. @@ -253,7 +254,8 @@ type ScopeProfiles struct { // A list of Profiles that originate from an instrumentation scope. Profiles []*Profile `protobuf:"bytes,2,rep,name=profiles,proto3" json:"profiles,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the profile data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all profiles in the "profiles" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` @@ -375,19 +377,6 @@ type Profile struct { // // This field is required. ProfileId go_opentelemetry_io_collector_pdata_internal_data.ProfileID `protobuf:"bytes,17,opt,name=profile_id,json=profileId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.ProfileID" json:"profile_id"` - // attributes is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "abc.com/myattribute": true - // "abc.com/score": 10.239 - // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attributes []v11.KeyValue `protobuf:"bytes,18,rep,name=attributes,proto3" json:"attributes"` // dropped_attributes_count is the number of attributes that were discarded. Attributes // can be discarded because their keys are too long or because there are too many // attributes. If this value is 0, then no attributes were dropped. @@ -401,6 +390,20 @@ type Profile struct { // The field is optional, however if it is present then equivalent converted data should be populated in other fields // of this message as far as is practicable. OriginalPayload []byte `protobuf:"bytes,21,opt,name=original_payload,json=originalPayload,proto3" json:"original_payload,omitempty"` + // References to attributes in attribute_table. [optional] + // It is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + AttributeIndices []int32 `protobuf:"varint,22,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"` } func (m *Profile) Reset() { *m = Profile{} } @@ -548,13 +551,6 @@ func (m *Profile) GetDefaultSampleTypeStrindex() int32 { return 0 } -func (m *Profile) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - func (m *Profile) GetDroppedAttributesCount() uint32 { if m != nil { return m.DroppedAttributesCount @@ -576,6 +572,13 @@ func (m *Profile) GetOriginalPayload() []byte { return nil } +func (m *Profile) GetAttributeIndices() []int32 { + if m != nil { + return m.AttributeIndices + } + return nil +} + // Represents a mapping between Attribute Keys and Units. type AttributeUnit struct { // Index into string table. @@ -865,84 +868,6 @@ func (*Sample) XXX_OneofWrappers() []interface{} { } } -// Provides additional context for a sample, -// such as thread ID or allocation size, with optional units. [deprecated] -type Label struct { - KeyStrindex int32 `protobuf:"varint,1,opt,name=key_strindex,json=keyStrindex,proto3" json:"key_strindex,omitempty"` - // At most one of the following must be present - StrStrindex int32 `protobuf:"varint,2,opt,name=str_strindex,json=strStrindex,proto3" json:"str_strindex,omitempty"` - Num int64 `protobuf:"varint,3,opt,name=num,proto3" json:"num,omitempty"` - // Should only be present when num is present. - // Specifies the units of num. - // Use arbitrary string (for example, "requests") as a custom count unit. - // If no unit is specified, consumer may apply heuristic to deduce the unit. - // Consumers may also interpret units like "bytes" and "kilobytes" as memory - // units and units like "seconds" and "nanoseconds" as time units, - // and apply appropriate unit conversions to these. - NumUnitStrindex int32 `protobuf:"varint,4,opt,name=num_unit_strindex,json=numUnitStrindex,proto3" json:"num_unit_strindex,omitempty"` -} - -func (m *Label) Reset() { *m = Label{} } -func (m *Label) String() string { return proto.CompactTextString(m) } -func (*Label) ProtoMessage() {} -func (*Label) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{8} -} -func (m *Label) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Label.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Label) XXX_Merge(src proto.Message) { - xxx_messageInfo_Label.Merge(m, src) -} -func (m *Label) XXX_Size() int { - return m.Size() -} -func (m *Label) XXX_DiscardUnknown() { - xxx_messageInfo_Label.DiscardUnknown(m) -} - -var xxx_messageInfo_Label proto.InternalMessageInfo - -func (m *Label) GetKeyStrindex() int32 { - if m != nil { - return m.KeyStrindex - } - return 0 -} - -func (m *Label) GetStrStrindex() int32 { - if m != nil { - return m.StrStrindex - } - return 0 -} - -func (m *Label) GetNum() int64 { - if m != nil { - return m.Num - } - return 0 -} - -func (m *Label) GetNumUnitStrindex() int32 { - if m != nil { - return m.NumUnitStrindex - } - return 0 -} - // Describes the mapping of a binary in memory, including its address range, // file offset, and metadata like build ID type Mapping struct { @@ -969,7 +894,7 @@ func (m *Mapping) Reset() { *m = Mapping{} } func (m *Mapping) String() string { return proto.CompactTextString(m) } func (*Mapping) ProtoMessage() {} func (*Mapping) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{9} + return fileDescriptor_ddd0cf081a2fe76f, []int{8} } func (m *Mapping) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1098,7 +1023,7 @@ func (m *Location) Reset() { *m = Location{} } func (m *Location) String() string { return proto.CompactTextString(m) } func (*Location) ProtoMessage() {} func (*Location) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{10} + return fileDescriptor_ddd0cf081a2fe76f, []int{9} } func (m *Location) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1202,7 +1127,7 @@ func (m *Line) Reset() { *m = Line{} } func (m *Line) String() string { return proto.CompactTextString(m) } func (*Line) ProtoMessage() {} func (*Line) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{11} + return fileDescriptor_ddd0cf081a2fe76f, []int{10} } func (m *Line) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1270,7 +1195,7 @@ func (m *Function) Reset() { *m = Function{} } func (m *Function) String() string { return proto.CompactTextString(m) } func (*Function) ProtoMessage() {} func (*Function) Descriptor() ([]byte, []int) { - return fileDescriptor_ddd0cf081a2fe76f, []int{12} + return fileDescriptor_ddd0cf081a2fe76f, []int{11} } func (m *Function) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1337,7 +1262,6 @@ func init() { proto.RegisterType((*Link)(nil), "opentelemetry.proto.profiles.v1development.Link") proto.RegisterType((*ValueType)(nil), "opentelemetry.proto.profiles.v1development.ValueType") proto.RegisterType((*Sample)(nil), "opentelemetry.proto.profiles.v1development.Sample") - proto.RegisterType((*Label)(nil), "opentelemetry.proto.profiles.v1development.Label") proto.RegisterType((*Mapping)(nil), "opentelemetry.proto.profiles.v1development.Mapping") proto.RegisterType((*Location)(nil), "opentelemetry.proto.profiles.v1development.Location") proto.RegisterType((*Line)(nil), "opentelemetry.proto.profiles.v1development.Line") @@ -1349,110 +1273,107 @@ func init() { } var fileDescriptor_ddd0cf081a2fe76f = []byte{ - // 1647 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0x5b, 0x6f, 0x1b, 0xc7, - 0x15, 0xd6, 0xf2, 0xce, 0xc3, 0x8b, 0xa8, 0x89, 0xac, 0x6c, 0xd3, 0x5a, 0x66, 0x68, 0xa4, 0x61, - 0x54, 0x44, 0xb2, 0xe4, 0xb4, 0x88, 0xd1, 0x02, 0xad, 0x64, 0x4a, 0x0e, 0x63, 0xea, 0xd2, 0x15, - 0x25, 0xd4, 0x6d, 0x80, 0xed, 0x88, 0x3b, 0xa4, 0xb6, 0xda, 0x9d, 0x5d, 0xec, 0x0c, 0x05, 0x13, - 0xfd, 0x09, 0xcd, 0x43, 0x1f, 0xfb, 0x1b, 0x0a, 0xf4, 0x37, 0xf4, 0x35, 0x8f, 0x46, 0x9f, 0x82, - 0x3e, 0x04, 0x85, 0xfd, 0x92, 0x16, 0xe8, 0x7f, 0x28, 0xe6, 0xb2, 0xcb, 0x4b, 0x29, 0x24, 0xeb, - 0x17, 0x61, 0xe7, 0x9c, 0x6f, 0xbe, 0x39, 0x67, 0xce, 0x65, 0x8e, 0x08, 0x4f, 0x82, 0x90, 0x50, - 0x4e, 0x3c, 0xe2, 0x13, 0x1e, 0x4d, 0x76, 0xc2, 0x28, 0xe0, 0x81, 0xf8, 0x3b, 0x74, 0x3d, 0xc2, - 0x76, 0x6e, 0x77, 0x1d, 0x72, 0x4b, 0xbc, 0x20, 0xf4, 0x09, 0xe5, 0x89, 0x78, 0x5b, 0xa2, 0xd0, - 0xd6, 0xdc, 0x56, 0x25, 0xdc, 0x4e, 0x30, 0x73, 0x5b, 0xdf, 0x5b, 0x1f, 0x05, 0xa3, 0x40, 0x91, - 0x8b, 0x2f, 0x05, 0x7e, 0x6f, 0x6b, 0xd9, 0xe1, 0x83, 0xc0, 0xf7, 0x03, 0xba, 0x73, 0xbb, 0xab, - 0xbf, 0x34, 0x76, 0x7b, 0x19, 0x36, 0x22, 0x2c, 0x18, 0x47, 0x03, 0x22, 0xd0, 0xf1, 0xb7, 0xc2, - 0xb7, 0x26, 0x50, 0x3d, 0xd3, 0xb6, 0x74, 0x30, 0xc7, 0xc8, 0x85, 0xb5, 0x18, 0x61, 0xc7, 0x46, - 0x9a, 0x46, 0x33, 0xdb, 0xae, 0xec, 0xfd, 0x62, 0xfb, 0xfb, 0x7b, 0xb2, 0x6d, 0x69, 0x92, 0x98, - 0xdc, 0x6a, 0x44, 0x0b, 0x92, 0xd6, 0xb7, 0x06, 0x34, 0x16, 0x61, 0xe8, 0x39, 0x94, 0x62, 0xa0, - 0x69, 0x34, 0x8d, 0x76, 0x65, 0xef, 0xa3, 0xa5, 0xc7, 0x26, 0x6e, 0xdc, 0xee, 0x26, 0x67, 0x1d, - 0xe4, 0xbe, 0xfa, 0xe6, 0xc1, 0x8a, 0x95, 0x10, 0xa0, 0xdf, 0x43, 0x9d, 0x0d, 0x82, 0x70, 0xc6, - 0x93, 0x8c, 0xf4, 0xe4, 0x49, 0x1a, 0x4f, 0xce, 0x05, 0x43, 0xe2, 0x46, 0x8d, 0xcd, 0x2e, 0xd1, - 0x7d, 0x00, 0x36, 0xb8, 0x26, 0x3e, 0xb6, 0xc7, 0x91, 0x67, 0x66, 0x9b, 0x46, 0xbb, 0x6c, 0x95, - 0x95, 0xe4, 0x22, 0xf2, 0x3e, 0x2f, 0x94, 0xbe, 0x2d, 0x36, 0xfe, 0x5d, 0x6c, 0xbd, 0x32, 0xa0, - 0x36, 0xc7, 0x83, 0x4e, 0x21, 0x2f, 0x99, 0xb4, 0x93, 0x8f, 0x97, 0x5a, 0xa4, 0x23, 0x7b, 0xbb, - 0xbb, 0xdd, 0xa5, 0x8c, 0x47, 0x63, 0x61, 0x0f, 0xe6, 0x6e, 0x40, 0x25, 0x97, 0x76, 0x57, 0xf1, - 0xa0, 0x53, 0x28, 0x2d, 0x78, 0xf9, 0x38, 0x8d, 0x97, 0xda, 0x30, 0x2b, 0x21, 0xf9, 0x0e, 0xd7, - 0x5a, 0x7f, 0xa9, 0x40, 0x51, 0x6f, 0x42, 0x97, 0x50, 0x61, 0xd8, 0x0f, 0x3d, 0x62, 0xf3, 0x89, - 0x74, 0x49, 0x1c, 0xff, 0xd3, 0x34, 0xc7, 0x5f, 0x62, 0x6f, 0x4c, 0xfa, 0x93, 0x90, 0x58, 0xa0, - 0x98, 0xc4, 0x37, 0xfa, 0x1c, 0x0a, 0x6a, 0xa5, 0x3d, 0xda, 0x4b, 0x15, 0x37, 0xb9, 0xd3, 0xd2, - 0x0c, 0xe8, 0x37, 0x50, 0xf3, 0x71, 0x18, 0xba, 0x74, 0x64, 0x73, 0x7c, 0xe5, 0x11, 0x33, 0x9b, - 0xfe, 0x92, 0x8e, 0x15, 0x81, 0x55, 0xd5, 0x4c, 0x7d, 0x41, 0x84, 0x7e, 0x07, 0x75, 0x2f, 0x18, - 0xc8, 0xb8, 0x68, 0xea, 0x9c, 0xa4, 0xfe, 0x24, 0x0d, 0x75, 0x4f, 0x33, 0x58, 0xb5, 0x98, 0x4b, - 0x91, 0x7f, 0x04, 0x8d, 0x84, 0xdc, 0xa5, 0x8e, 0x3b, 0x20, 0xcc, 0xcc, 0x37, 0xb3, 0xed, 0xbc, - 0xb5, 0x1a, 0xcb, 0xbb, 0x4a, 0x2c, 0xec, 0x18, 0x8e, 0xe9, 0x60, 0xc6, 0x8e, 0x42, 0x7a, 0x3b, - 0x8e, 0x34, 0x83, 0x55, 0x8b, 0xb9, 0x94, 0x1d, 0x97, 0xb0, 0x8a, 0x39, 0x8f, 0xdc, 0xab, 0x31, - 0x27, 0x9a, 0xbd, 0x28, 0xd9, 0x3f, 0xfc, 0x8e, 0xcc, 0x7d, 0x4e, 0x26, 0x32, 0xb8, 0x3a, 0x5b, - 0xeb, 0x09, 0x8b, 0xe2, 0xbd, 0x9a, 0xe5, 0x1d, 0x53, 0x97, 0x33, 0xb3, 0x94, 0xbe, 0x46, 0xf7, - 0x63, 0x8a, 0x0b, 0xea, 0xf2, 0x99, 0x33, 0xc4, 0x52, 0xd4, 0x1a, 0x78, 0x2e, 0xbd, 0xd1, 0x66, - 0x97, 0x25, 0xfd, 0xa3, 0x54, 0xc1, 0x71, 0xe9, 0x8d, 0x55, 0x16, 0x1c, 0xca, 0xe8, 0xf7, 0xa1, - 0xca, 0x78, 0x34, 0x4d, 0x25, 0x68, 0x66, 0xdb, 0x65, 0xab, 0xa2, 0x64, 0x0a, 0x72, 0x1f, 0x80, - 0xbb, 0x3e, 0xb1, 0x29, 0xa6, 0x01, 0x33, 0x2b, 0x4d, 0xa3, 0x9d, 0xb5, 0xca, 0x42, 0x72, 0x22, - 0x04, 0xe8, 0x03, 0xa8, 0x3b, 0xe3, 0x48, 0x85, 0x55, 0x41, 0xaa, 0x12, 0x52, 0x8b, 0xa5, 0x0a, - 0xf6, 0x05, 0x54, 0x42, 0x12, 0xb9, 0x81, 0xa3, 0x0a, 0xab, 0x26, 0x7b, 0xc5, 0xdb, 0x15, 0x96, - 0xbe, 0x7f, 0x50, 0x7c, 0xb2, 0xbc, 0x36, 0xa0, 0xa0, 0x56, 0x66, 0x5d, 0x1e, 0xae, 0x57, 0xe8, - 0x63, 0x40, 0x22, 0x7e, 0x84, 0x72, 0x5b, 0xba, 0xa4, 0xb2, 0x6e, 0x55, 0x66, 0xdd, 0x9a, 0xd6, - 0x9c, 0x27, 0x0a, 0xf4, 0x4b, 0xf8, 0x91, 0x43, 0x86, 0x78, 0xec, 0x71, 0x7b, 0xa6, 0x0b, 0xe8, - 0xad, 0xe4, 0xa5, 0xd9, 0x68, 0x1a, 0xed, 0xbc, 0xf5, 0x03, 0x8d, 0x39, 0x4f, 0xca, 0xfb, 0x5c, - 0x03, 0xd0, 0x15, 0x80, 0xb6, 0xde, 0x76, 0x1d, 0x73, 0xad, 0x69, 0xb4, 0xab, 0x07, 0x4f, 0x85, - 0xb5, 0xff, 0xfc, 0xe6, 0xc1, 0xcf, 0x47, 0xc1, 0x82, 0xbb, 0xae, 0x78, 0xfb, 0x3c, 0x8f, 0x0c, - 0x78, 0x10, 0xed, 0x84, 0x0e, 0xe6, 0x78, 0xc7, 0xa5, 0x9c, 0x44, 0x14, 0x7b, 0x3b, 0x62, 0x15, - 0xb7, 0xb2, 0x6e, 0xc7, 0x2a, 0x6b, 0xda, 0xae, 0x83, 0x8e, 0x01, 0x92, 0xac, 0x60, 0x26, 0x7a, - 0x9b, 0xd4, 0x9d, 0x21, 0x40, 0x9f, 0x82, 0xe9, 0x44, 0x41, 0x18, 0x12, 0xc7, 0x9e, 0x4a, 0xed, - 0x41, 0x30, 0xa6, 0xdc, 0x7c, 0xa7, 0x69, 0xb4, 0x6b, 0xd6, 0x86, 0xd6, 0x27, 0xa9, 0xc9, 0x9e, - 0x0a, 0x2d, 0xfa, 0x19, 0xbc, 0x1b, 0x44, 0xee, 0xc8, 0xa5, 0xd8, 0xb3, 0x43, 0x3c, 0xf1, 0x02, - 0xec, 0xd8, 0xc3, 0x20, 0xf2, 0x31, 0x37, 0xd7, 0x65, 0x8f, 0xbd, 0x17, 0xab, 0xcf, 0x94, 0xf6, - 0x48, 0x2a, 0x45, 0x23, 0x58, 0xdc, 0x67, 0xde, 0x13, 0x57, 0x65, 0xad, 0x2e, 0x6c, 0x68, 0xfd, - 0x01, 0x6a, 0x73, 0x05, 0x81, 0x3e, 0x81, 0x8d, 0x69, 0x91, 0xdd, 0x90, 0xc9, 0x34, 0x36, 0x86, - 0x8c, 0xcd, 0x7a, 0xa2, 0x7d, 0x4e, 0x26, 0x49, 0x58, 0x1e, 0x42, 0x4d, 0x14, 0xe4, 0x14, 0x9c, - 0x91, 0xe0, 0xaa, 0x10, 0xc6, 0xa0, 0xd6, 0xdf, 0x0d, 0xc8, 0x89, 0xf2, 0x40, 0x5f, 0x40, 0x89, - 0x47, 0x78, 0x20, 0x43, 0x68, 0xc8, 0x10, 0xee, 0xeb, 0x10, 0x3e, 0x49, 0x1f, 0xc2, 0xbe, 0x60, - 0xea, 0x76, 0xac, 0xa2, 0xa4, 0xec, 0x3a, 0xe8, 0x05, 0x14, 0x59, 0x88, 0xa9, 0x20, 0xcf, 0x48, - 0xf2, 0x5f, 0x69, 0xf2, 0x4f, 0xd3, 0x93, 0x9f, 0x87, 0x98, 0x76, 0x3b, 0x56, 0x41, 0x10, 0x76, - 0x9d, 0xd6, 0x3f, 0x0c, 0x28, 0x27, 0x55, 0x22, 0x9c, 0x9e, 0xcf, 0x5e, 0x75, 0x43, 0x55, 0x3e, - 0x9b, 0xb0, 0xdf, 0xe7, 0x66, 0xd0, 0x1f, 0xe1, 0x5d, 0x3c, 0x1a, 0x45, 0x64, 0xa4, 0x5f, 0x06, - 0xe2, 0x87, 0x41, 0x84, 0x3d, 0x97, 0x4f, 0xe4, 0x63, 0x5a, 0xdf, 0x3b, 0x48, 0xd5, 0xe1, 0xa6, - 0x54, 0xfd, 0x29, 0x93, 0xb5, 0x81, 0x97, 0xca, 0x5b, 0x5f, 0x66, 0xa0, 0xa0, 0x2a, 0x0d, 0xed, - 0xc1, 0xbd, 0xf8, 0xa5, 0x60, 0x36, 0xe3, 0x38, 0xe2, 0xf6, 0xac, 0x67, 0xef, 0x24, 0xca, 0x73, - 0xa1, 0xeb, 0x4a, 0xdb, 0x67, 0x5e, 0x1d, 0x66, 0x7b, 0x84, 0x8e, 0xf8, 0xb5, 0xf6, 0x31, 0x79, - 0x75, 0x58, 0x4f, 0x8a, 0xd1, 0x3a, 0xe4, 0x6f, 0xc5, 0xed, 0xc9, 0xf7, 0x34, 0x6b, 0xa9, 0x05, - 0xfa, 0x09, 0xac, 0x4d, 0x33, 0x2e, 0xee, 0x20, 0x39, 0xd9, 0x41, 0x1a, 0x89, 0x22, 0x7e, 0xb8, - 0x1e, 0xe8, 0xfe, 0xac, 0xcc, 0xca, 0x8b, 0x73, 0x3e, 0x5b, 0x51, 0xfd, 0x56, 0x99, 0xf3, 0x08, - 0xd6, 0x45, 0xeb, 0x64, 0x1c, 0xfb, 0x21, 0x13, 0xaf, 0xc4, 0x4b, 0xd9, 0x34, 0xe5, 0xfb, 0x96, - 0xb3, 0xd0, 0x54, 0x77, 0x41, 0xdd, 0x97, 0xa2, 0x73, 0x1e, 0xd4, 0xa0, 0x32, 0xa5, 0xb4, 0x5b, - 0x7f, 0x32, 0x20, 0xdf, 0xc3, 0x57, 0xc4, 0x13, 0xad, 0x7b, 0x49, 0x01, 0x54, 0x6e, 0x66, 0xf2, - 0x5e, 0x75, 0xf7, 0xc5, 0xe0, 0x8a, 0xee, 0x9e, 0x40, 0x1a, 0x90, 0xa5, 0x63, 0x5f, 0xc6, 0x31, - 0x6b, 0x89, 0x4f, 0xb4, 0x05, 0x6b, 0x74, 0xec, 0xdb, 0xf3, 0x69, 0x91, 0x53, 0x57, 0x46, 0xc7, - 0xfe, 0xc5, 0x6c, 0xcd, 0xfc, 0x27, 0x03, 0x45, 0x3d, 0x4a, 0x88, 0xc3, 0x7c, 0xe2, 0x07, 0xd1, - 0x44, 0x85, 0x46, 0xda, 0x93, 0xb3, 0x2a, 0x4a, 0x26, 0x23, 0x32, 0x03, 0xf1, 0x5c, 0xdf, 0xe5, - 0xd2, 0x9e, 0x04, 0xd2, 0x13, 0x22, 0xf4, 0x00, 0x2a, 0xb2, 0x7d, 0x06, 0xc3, 0x21, 0x23, 0x5c, - 0xda, 0x95, 0xb3, 0x40, 0x88, 0x4e, 0xa5, 0x44, 0xc4, 0x43, 0xac, 0x28, 0xf6, 0xc9, 0xa2, 0x79, - 0x8d, 0x58, 0x91, 0x78, 0xb7, 0x34, 0x78, 0xf9, 0x3b, 0x82, 0xf7, 0x10, 0x6a, 0xd7, 0x98, 0xd9, - 0xf1, 0xb4, 0xc0, 0xcc, 0x42, 0xd3, 0x68, 0x97, 0xac, 0xea, 0x35, 0x66, 0xf1, 0x2c, 0x31, 0x05, - 0xe9, 0x93, 0x98, 0x59, 0x9c, 0x82, 0x62, 0x19, 0x6a, 0x43, 0x43, 0x80, 0x3c, 0x97, 0x12, 0x9b, - 0x8e, 0xfd, 0x2b, 0x12, 0x89, 0x59, 0x40, 0xe0, 0xea, 0xd7, 0x98, 0xf5, 0x5c, 0x4a, 0x4e, 0x94, - 0x54, 0x5c, 0xb6, 0x40, 0xba, 0x54, 0x62, 0x87, 0x91, 0xa4, 0x2c, 0x4b, 0xe8, 0xea, 0x35, 0x66, - 0x5d, 0x29, 0x3f, 0x92, 0xe2, 0xd6, 0x7f, 0x0d, 0x28, 0xc5, 0xc3, 0x15, 0xfa, 0x60, 0x3a, 0x04, - 0xce, 0x84, 0xff, 0xb3, 0x95, 0x64, 0xa2, 0x53, 0xf9, 0x66, 0x42, 0x11, 0x3b, 0x4e, 0x44, 0x18, - 0xd3, 0x97, 0x1d, 0x2f, 0x51, 0x07, 0x72, 0x82, 0x5b, 0x0f, 0x8f, 0x69, 0x87, 0x08, 0x62, 0xc9, - 0xdd, 0xe8, 0x87, 0x50, 0x76, 0x99, 0x3d, 0x0c, 0x3c, 0x87, 0x38, 0x32, 0x0a, 0x25, 0xab, 0xe4, - 0xb2, 0x23, 0xb9, 0x4e, 0x75, 0xfb, 0x07, 0x0d, 0xa8, 0xcf, 0x39, 0x64, 0xb7, 0x5e, 0xc8, 0x7e, - 0x4c, 0xc4, 0x84, 0x91, 0x4c, 0x83, 0xb3, 0xa9, 0x9e, 0xcc, 0x75, 0xca, 0x55, 0xa4, 0x1d, 0xca, - 0xc8, 0x54, 0x56, 0xe6, 0x6d, 0x40, 0x61, 0x10, 0x78, 0x63, 0x9f, 0xea, 0x04, 0xd7, 0xab, 0xd6, - 0xdf, 0x0c, 0x28, 0xc5, 0x31, 0x15, 0x21, 0x9d, 0xcf, 0x26, 0xdd, 0x28, 0xe7, 0x32, 0xe9, 0x11, - 0xac, 0xb3, 0x09, 0xe3, 0xc4, 0xb7, 0xe7, 0xb1, 0xaa, 0xa4, 0x90, 0xd2, 0x9d, 0x2c, 0xe4, 0xde, - 0xff, 0x27, 0x6a, 0xf6, 0x8e, 0x44, 0x15, 0xff, 0xa2, 0xc8, 0x86, 0x26, 0x5d, 0xc8, 0xa9, 0x21, - 0x4b, 0x4a, 0xc4, 0x15, 0x6c, 0x7d, 0x69, 0xc0, 0xc6, 0xf2, 0xbe, 0x89, 0x3e, 0x84, 0x87, 0xfb, - 0xcf, 0x9e, 0x59, 0x87, 0xcf, 0xf6, 0xfb, 0xdd, 0xd3, 0x13, 0xbb, 0x7f, 0x78, 0x7c, 0x76, 0x6a, - 0xed, 0xf7, 0xba, 0xfd, 0x17, 0xf6, 0xc5, 0xc9, 0xf9, 0xd9, 0xe1, 0xd3, 0xee, 0x51, 0xf7, 0xb0, - 0xd3, 0x58, 0x41, 0xef, 0xc3, 0xfd, 0xbb, 0x80, 0x9d, 0xc3, 0x5e, 0x7f, 0xbf, 0x61, 0xa0, 0x1f, - 0x43, 0xeb, 0x2e, 0xc8, 0xd3, 0x8b, 0xe3, 0x8b, 0xde, 0x7e, 0xbf, 0x7b, 0x79, 0xd8, 0xc8, 0x1c, - 0x7c, 0x6d, 0x7c, 0xf5, 0x7a, 0xd3, 0x78, 0xf5, 0x7a, 0xd3, 0xf8, 0xd7, 0xeb, 0x4d, 0xe3, 0xcf, - 0x6f, 0x36, 0x57, 0x5e, 0xbd, 0xd9, 0x5c, 0xf9, 0xfa, 0xcd, 0xe6, 0x0a, 0x7c, 0xec, 0x06, 0x29, - 0x52, 0xe9, 0xa0, 0x16, 0xff, 0x1b, 0x79, 0x26, 0x50, 0x67, 0xc6, 0x6f, 0x7f, 0x9d, 0xfa, 0x15, - 0x54, 0xbf, 0x0c, 0x8c, 0x08, 0xbd, 0xe3, 0x57, 0x8c, 0xbf, 0x66, 0xb6, 0x4e, 0x43, 0x42, 0xfb, - 0x09, 0xa1, 0x3c, 0x2a, 0x9e, 0xad, 0xd8, 0xf6, 0xe5, 0x6e, 0x67, 0x0a, 0xbe, 0x2a, 0x48, 0xb6, - 0xc7, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x31, 0xf3, 0xe8, 0x27, 0x11, 0x00, 0x00, + // 1591 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0xdb, 0x6f, 0x13, 0x57, + 0x1a, 0xcf, 0xd8, 0x8e, 0x2f, 0x9f, 0x2f, 0x71, 0x0e, 0x21, 0xcc, 0xb2, 0x4b, 0x62, 0x8c, 0x58, + 0x4c, 0x56, 0x24, 0x24, 0xb0, 0x2b, 0xd0, 0xae, 0xb4, 0x9b, 0xc4, 0x09, 0x18, 0x42, 0x92, 0x9d, + 0x38, 0x51, 0x69, 0x91, 0xa6, 0x27, 0x9e, 0x63, 0x67, 0xca, 0xdc, 0x34, 0xe7, 0x38, 0xc2, 0xea, + 0xbf, 0xc0, 0x43, 0xff, 0x8e, 0x4a, 0xfd, 0x1b, 0xfa, 0xca, 0x23, 0xea, 0x13, 0xea, 0x03, 0xaa, + 0xe0, 0x85, 0x56, 0xea, 0x6b, 0x9f, 0xab, 0x73, 0x99, 0xb1, 0x9d, 0x3a, 0x82, 0xe9, 0x4b, 0x34, + 0xe7, 0x3b, 0xbf, 0xf3, 0x3b, 0xdf, 0xfd, 0x3b, 0x0e, 0xdc, 0xf7, 0x03, 0xe2, 0x31, 0xe2, 0x10, + 0x97, 0xb0, 0x70, 0xb0, 0x12, 0x84, 0x3e, 0xf3, 0xf9, 0xdf, 0xae, 0xed, 0x10, 0xba, 0x72, 0xba, + 0x6a, 0x91, 0x53, 0xe2, 0xf8, 0x81, 0x4b, 0x3c, 0x16, 0x8b, 0x97, 0x05, 0x0a, 0x2d, 0x8d, 0x1d, + 0x95, 0xc2, 0xe5, 0x18, 0x33, 0x76, 0xf4, 0xf2, 0x5c, 0xcf, 0xef, 0xf9, 0x92, 0x9c, 0x7f, 0x49, + 0xf0, 0xe5, 0xa5, 0x49, 0x97, 0x77, 0x7c, 0xd7, 0xf5, 0xbd, 0x95, 0xd3, 0x55, 0xf5, 0xa5, 0xb0, + 0xcb, 0x93, 0xb0, 0x21, 0xa1, 0x7e, 0x3f, 0xec, 0x10, 0x8e, 0x8e, 0xbe, 0x25, 0xbe, 0x3e, 0x80, + 0xd2, 0xbe, 0xd2, 0xa5, 0x89, 0x19, 0x46, 0x36, 0xcc, 0x46, 0x08, 0x33, 0x52, 0x52, 0xd7, 0x6a, + 0xe9, 0x46, 0x71, 0xed, 0x3f, 0xcb, 0x9f, 0x6e, 0xc9, 0xb2, 0xa1, 0x48, 0x22, 0x72, 0xa3, 0x1a, + 0x9e, 0x91, 0xd4, 0x3f, 0x68, 0x50, 0x3d, 0x0b, 0x43, 0x8f, 0x21, 0x1f, 0x01, 0x75, 0xad, 0xa6, + 0x35, 0x8a, 0x6b, 0x37, 0x27, 0x5e, 0x1b, 0x9b, 0x71, 0xba, 0x1a, 0xdf, 0xb5, 0x91, 0x79, 0xf5, + 0x76, 0x71, 0xca, 0x88, 0x09, 0xd0, 0x97, 0x50, 0xa1, 0x1d, 0x3f, 0x18, 0xb1, 0x24, 0x25, 0x2c, + 0xb9, 0x9f, 0xc4, 0x92, 0x03, 0xce, 0x10, 0x9b, 0x51, 0xa6, 0xa3, 0x4b, 0x74, 0x05, 0x80, 0x76, + 0x4e, 0x88, 0x8b, 0xcd, 0x7e, 0xe8, 0xe8, 0xe9, 0x9a, 0xd6, 0x28, 0x18, 0x05, 0x29, 0x39, 0x0c, + 0x9d, 0x47, 0xd9, 0xfc, 0x87, 0x5c, 0xf5, 0xe7, 0x5c, 0xfd, 0xb5, 0x06, 0xe5, 0x31, 0x1e, 0xb4, + 0x07, 0xd3, 0x82, 0x49, 0x19, 0x79, 0x67, 0xa2, 0x46, 0x2a, 0xb2, 0xa7, 0xab, 0xcb, 0x2d, 0x8f, + 0xb2, 0xb0, 0xcf, 0xf5, 0xc1, 0xcc, 0xf6, 0x3d, 0xc1, 0xa5, 0xcc, 0x95, 0x3c, 0x68, 0x0f, 0xf2, + 0x67, 0xac, 0xbc, 0x93, 0xc4, 0x4a, 0xa5, 0x98, 0x11, 0x93, 0x7c, 0xc4, 0xb4, 0xfa, 0x6f, 0x00, + 0x39, 0x75, 0x08, 0x1d, 0x41, 0x91, 0x62, 0x37, 0x70, 0x88, 0xc9, 0x06, 0xc2, 0x24, 0x7e, 0xfd, + 0x3f, 0x93, 0x5c, 0x7f, 0x84, 0x9d, 0x3e, 0x69, 0x0f, 0x02, 0x62, 0x80, 0x64, 0xe2, 0xdf, 0xe8, + 0x11, 0x64, 0xe5, 0x4a, 0x59, 0xb4, 0x96, 0x28, 0x6e, 0xe2, 0xa4, 0xa1, 0x18, 0xd0, 0x67, 0x50, + 0x76, 0x71, 0x10, 0xd8, 0x5e, 0xcf, 0x64, 0xf8, 0xd8, 0x21, 0x7a, 0x3a, 0xb9, 0x93, 0x9e, 0x48, + 0x02, 0xa3, 0xa4, 0x98, 0xda, 0x9c, 0x08, 0x7d, 0x01, 0x15, 0xc7, 0xef, 0x88, 0xb8, 0x28, 0xea, + 0x8c, 0xa0, 0xbe, 0x9b, 0x84, 0x7a, 0x47, 0x31, 0x18, 0xe5, 0x88, 0x4b, 0x92, 0xdf, 0x84, 0x6a, + 0x4c, 0x6e, 0x7b, 0x96, 0xdd, 0x21, 0x54, 0x9f, 0xae, 0xa5, 0x1b, 0xd3, 0xc6, 0x4c, 0x24, 0x6f, + 0x49, 0x31, 0xd7, 0xa3, 0xdb, 0xf7, 0x3a, 0x23, 0x7a, 0x64, 0x93, 0xeb, 0xb1, 0xad, 0x18, 0x8c, + 0x72, 0xc4, 0x25, 0xf5, 0x38, 0x82, 0x19, 0xcc, 0x58, 0x68, 0x1f, 0xf7, 0x19, 0x51, 0xec, 0x39, + 0xc1, 0x7e, 0xe3, 0x23, 0x99, 0xfb, 0x98, 0x0c, 0x44, 0x70, 0x55, 0xb6, 0x56, 0x62, 0x16, 0xc9, + 0x7b, 0x3c, 0xca, 0xdb, 0xf7, 0x6c, 0x46, 0xf5, 0x7c, 0xf2, 0x1a, 0x5d, 0x8f, 0x28, 0x0e, 0x3d, + 0x9b, 0x8d, 0xdc, 0xc1, 0x97, 0xbc, 0xd6, 0xc0, 0xb1, 0xbd, 0xe7, 0x4a, 0xed, 0x82, 0xa0, 0xbf, + 0x9d, 0x28, 0x38, 0xb6, 0xf7, 0xdc, 0x28, 0x70, 0x0e, 0xa9, 0xf4, 0x55, 0x28, 0x51, 0x16, 0x0e, + 0x53, 0x09, 0x6a, 0xe9, 0x46, 0xc1, 0x28, 0x4a, 0x99, 0x84, 0x5c, 0x01, 0x60, 0xb6, 0x4b, 0x4c, + 0x0f, 0x7b, 0x3e, 0xd5, 0x8b, 0x35, 0xad, 0x91, 0x36, 0x0a, 0x5c, 0xb2, 0xcb, 0x05, 0xe8, 0x3a, + 0x54, 0xac, 0x7e, 0x28, 0xc3, 0x2a, 0x21, 0x25, 0x01, 0x29, 0x47, 0x52, 0x09, 0x7b, 0x06, 0xc5, + 0x80, 0x84, 0xb6, 0x6f, 0xc9, 0xc2, 0x2a, 0x8b, 0x5e, 0xf1, 0xe7, 0x0a, 0x4b, 0xf9, 0x1f, 0x24, + 0x9f, 0x28, 0xaf, 0x79, 0xc8, 0xca, 0x95, 0x5e, 0x11, 0x97, 0xab, 0x15, 0xba, 0x05, 0x88, 0xc7, + 0x8f, 0x78, 0xcc, 0x14, 0x26, 0xc9, 0xac, 0x9b, 0x11, 0x59, 0x37, 0xab, 0x76, 0x0e, 0xe2, 0x0d, + 0xf4, 0x5f, 0xf8, 0x9b, 0x45, 0xba, 0xb8, 0xef, 0x30, 0x73, 0xa4, 0x0b, 0xa8, 0xa3, 0xe4, 0x85, + 0x5e, 0xad, 0x69, 0x8d, 0x69, 0xe3, 0x2f, 0x0a, 0x73, 0x10, 0x97, 0xf7, 0x81, 0x02, 0xa0, 0x63, + 0x00, 0xa5, 0xbd, 0x69, 0x5b, 0xfa, 0x6c, 0x4d, 0x6b, 0x94, 0x36, 0x36, 0xb9, 0xb6, 0x3f, 0xbe, + 0x5d, 0xfc, 0x77, 0xcf, 0x3f, 0x63, 0xae, 0xcd, 0x67, 0x9f, 0xe3, 0x90, 0x0e, 0xf3, 0xc3, 0x95, + 0xc0, 0xc2, 0x0c, 0xaf, 0xd8, 0x1e, 0x23, 0xa1, 0x87, 0x9d, 0x15, 0xbe, 0x8a, 0x5a, 0x59, 0xab, + 0x69, 0x14, 0x14, 0x6d, 0xcb, 0x42, 0xf7, 0x40, 0xb7, 0x42, 0x3f, 0x08, 0x88, 0x65, 0xc6, 0xd9, + 0x41, 0xcd, 0x8e, 0xdf, 0xf7, 0x98, 0x7e, 0xa1, 0xa6, 0x35, 0xca, 0xc6, 0xbc, 0xda, 0x8f, 0x73, + 0x89, 0x6e, 0xf2, 0x5d, 0xf4, 0x2f, 0xb8, 0xe4, 0x87, 0x76, 0xcf, 0xf6, 0xb0, 0x63, 0x06, 0x78, + 0xe0, 0xf8, 0xd8, 0x32, 0xbb, 0x7e, 0xe8, 0x62, 0xa6, 0xcf, 0x89, 0xa6, 0x78, 0x31, 0xda, 0xde, + 0x97, 0xbb, 0xdb, 0x62, 0x93, 0x57, 0xee, 0xd9, 0x73, 0xfa, 0x45, 0x6e, 0x9b, 0x31, 0x73, 0xe6, + 0x00, 0xfa, 0x07, 0xcc, 0x0e, 0x8b, 0x20, 0xf2, 0xf7, 0xbc, 0xf0, 0x77, 0x35, 0xde, 0x50, 0x65, + 0x5e, 0xff, 0x0a, 0xca, 0x63, 0xe9, 0x8e, 0xee, 0xc2, 0xfc, 0xf0, 0xf4, 0x73, 0x32, 0x18, 0x7a, + 0x5e, 0x13, 0x9e, 0x9f, 0x8b, 0x77, 0x1f, 0x93, 0x41, 0xec, 0xf4, 0x6b, 0x50, 0xe6, 0xe5, 0x36, + 0x04, 0xa7, 0x04, 0xb8, 0xc4, 0x85, 0x11, 0xa8, 0xfe, 0xbd, 0x06, 0x19, 0x9e, 0xfc, 0xe8, 0x19, + 0xe4, 0x59, 0x88, 0x3b, 0x22, 0x40, 0x9a, 0x08, 0xd0, 0xba, 0x0a, 0xd0, 0xfd, 0xe4, 0x01, 0x6a, + 0x73, 0xa6, 0x56, 0xd3, 0xc8, 0x09, 0xca, 0x96, 0x85, 0x9e, 0x42, 0x8e, 0x06, 0xd8, 0xe3, 0xe4, + 0x29, 0x41, 0xfe, 0x3f, 0x45, 0x7e, 0x2f, 0x39, 0xf9, 0x41, 0x80, 0xbd, 0x56, 0xd3, 0xc8, 0x72, + 0xc2, 0x96, 0x55, 0xff, 0x41, 0x83, 0x42, 0x5c, 0x03, 0xdc, 0xe8, 0xf1, 0xdc, 0x94, 0x1e, 0x2a, + 0xb1, 0xd1, 0x74, 0xfc, 0x14, 0xcf, 0xa0, 0xaf, 0xe1, 0x12, 0xee, 0xf5, 0x42, 0xd2, 0x53, 0x7d, + 0x9f, 0xb8, 0x81, 0x1f, 0x62, 0xc7, 0x66, 0x03, 0x31, 0x2a, 0x2b, 0x6b, 0x1b, 0x89, 0xfa, 0xd7, + 0x90, 0xaa, 0x3d, 0x64, 0x32, 0xe6, 0xf1, 0x44, 0x79, 0xfd, 0x65, 0x0a, 0xb2, 0xb2, 0x8e, 0xd0, + 0x1a, 0x5c, 0x8c, 0xe6, 0x00, 0x35, 0x29, 0xc3, 0x21, 0x33, 0x47, 0x2d, 0xbb, 0x10, 0x6f, 0x1e, + 0xf0, 0xbd, 0x96, 0xd0, 0x7d, 0x64, 0xa6, 0x50, 0xd3, 0x21, 0x5e, 0x8f, 0x9d, 0x28, 0x1b, 0xe3, + 0x99, 0x42, 0x77, 0x84, 0x18, 0xcd, 0xc1, 0xf4, 0x29, 0xf7, 0x9e, 0x98, 0x96, 0x69, 0x43, 0x2e, + 0x26, 0xe7, 0x6b, 0x66, 0x72, 0xbe, 0xa2, 0x45, 0xd5, 0x7d, 0xa5, 0x5a, 0xd3, 0xfc, 0x9e, 0x87, + 0x53, 0xb2, 0x9b, 0x4a, 0x75, 0x6e, 0xc3, 0x1c, 0x6f, 0x8c, 0x94, 0x61, 0x37, 0xa0, 0x7c, 0x06, + 0xbc, 0x10, 0x2d, 0x51, 0x4c, 0xaf, 0x8c, 0x81, 0x86, 0x7b, 0x87, 0x9e, 0xfd, 0x82, 0xf7, 0xc5, + 0x8d, 0x32, 0x14, 0x87, 0x94, 0x66, 0xfd, 0x97, 0x14, 0xe4, 0xd4, 0x68, 0xe6, 0xad, 0xd9, 0x25, + 0xae, 0x1f, 0x0e, 0xa4, 0x33, 0x84, 0x1b, 0x32, 0x46, 0x51, 0xca, 0x84, 0x0f, 0x46, 0x20, 0x8e, + 0xed, 0xda, 0x4c, 0x98, 0x1e, 0x43, 0x76, 0xb8, 0x08, 0x2d, 0x42, 0x51, 0xb4, 0x23, 0xbf, 0xdb, + 0xa5, 0x84, 0x89, 0x88, 0x66, 0x0c, 0xe0, 0xa2, 0x3d, 0x21, 0xe1, 0x1e, 0xe0, 0x2b, 0x0f, 0xbb, + 0x23, 0xc9, 0x94, 0x11, 0x3e, 0xac, 0x46, 0x1b, 0x71, 0xae, 0x4c, 0x74, 0xd7, 0xf4, 0x39, 0xee, + 0xba, 0x06, 0xe5, 0x13, 0x4c, 0xcd, 0x68, 0xfa, 0x52, 0x3d, 0x5b, 0xd3, 0x1a, 0x79, 0xa3, 0x74, + 0x82, 0x69, 0x34, 0x9b, 0x87, 0x20, 0x75, 0x13, 0xd5, 0x73, 0x43, 0x50, 0x24, 0x43, 0x0d, 0xa8, + 0x72, 0x90, 0x63, 0x7b, 0xc4, 0xf4, 0xfa, 0xee, 0x31, 0x09, 0xf9, 0x6c, 0xe5, 0xb8, 0xca, 0x09, + 0xa6, 0x3b, 0xb6, 0x47, 0x76, 0xa5, 0x14, 0x2d, 0xc1, 0x2c, 0x47, 0xda, 0x9e, 0xc0, 0x76, 0x43, + 0x41, 0x59, 0x10, 0xd0, 0x99, 0x13, 0x4c, 0x5b, 0x42, 0xbe, 0x2d, 0xc4, 0xf5, 0x5f, 0x35, 0xc8, + 0x47, 0x8f, 0x15, 0x74, 0x7d, 0xf8, 0xa8, 0x1a, 0xc9, 0xba, 0x87, 0x53, 0xf1, 0x0b, 0x49, 0x46, + 0x58, 0x87, 0x1c, 0xb6, 0xac, 0x90, 0x50, 0xaa, 0x9c, 0x1d, 0x2d, 0x51, 0x13, 0x32, 0x9c, 0x5b, + 0x3d, 0xc6, 0x92, 0x0e, 0x65, 0x62, 0x88, 0xd3, 0xe8, 0xaf, 0x50, 0xb0, 0xa9, 0xd9, 0xf5, 0x1d, + 0x8b, 0x58, 0x22, 0x0a, 0x79, 0x23, 0x6f, 0xd3, 0x6d, 0xb1, 0x4e, 0xe4, 0xfd, 0x8d, 0x2a, 0x54, + 0xc6, 0x0c, 0x32, 0xeb, 0x4f, 0x45, 0x07, 0x24, 0x7c, 0x62, 0xc7, 0xaf, 0xab, 0xd1, 0x0a, 0x8b, + 0xdf, 0x49, 0xd2, 0x54, 0xa4, 0x0c, 0x4a, 0x89, 0x89, 0x2a, 0xd5, 0x9b, 0x87, 0x6c, 0xc7, 0x77, + 0xfa, 0xae, 0x27, 0x12, 0x29, 0x6d, 0xa8, 0x55, 0xfd, 0x3b, 0x0d, 0xf2, 0x51, 0x4c, 0x79, 0x48, + 0xc7, 0xb3, 0x49, 0xb5, 0xa6, 0xb1, 0x4c, 0xba, 0x0d, 0x73, 0x74, 0x40, 0x19, 0x71, 0xcd, 0x71, + 0xac, 0xac, 0x5e, 0x24, 0xf7, 0x76, 0xcf, 0xe4, 0xde, 0x1f, 0x13, 0x35, 0x7d, 0x4e, 0xa2, 0xf2, + 0x27, 0xbf, 0x68, 0x21, 0xc2, 0x84, 0x8c, 0x7c, 0xb4, 0x08, 0x09, 0x77, 0xc1, 0xd2, 0x4b, 0x0d, + 0xe6, 0x27, 0x77, 0x2a, 0x74, 0x03, 0xae, 0xad, 0x3f, 0x78, 0x60, 0x6c, 0x3d, 0x58, 0x6f, 0xb7, + 0xf6, 0x76, 0xcd, 0xf6, 0xd6, 0x93, 0xfd, 0x3d, 0x63, 0x7d, 0xa7, 0xd5, 0x7e, 0x6a, 0x1e, 0xee, + 0x1e, 0xec, 0x6f, 0x6d, 0xb6, 0xb6, 0x5b, 0x5b, 0xcd, 0xea, 0x14, 0xba, 0x0a, 0x57, 0xce, 0x03, + 0x36, 0xb7, 0x76, 0xda, 0xeb, 0x55, 0x0d, 0xfd, 0x1d, 0xea, 0xe7, 0x41, 0x36, 0x0f, 0x9f, 0x1c, + 0xee, 0xac, 0xb7, 0x5b, 0x47, 0x5b, 0xd5, 0xd4, 0xc6, 0x1b, 0xed, 0xd5, 0xbb, 0x05, 0xed, 0xf5, + 0xbb, 0x05, 0xed, 0xa7, 0x77, 0x0b, 0xda, 0x37, 0xef, 0x17, 0xa6, 0x5e, 0xbf, 0x5f, 0x98, 0x7a, + 0xf3, 0x7e, 0x61, 0x0a, 0x6e, 0xd9, 0x7e, 0x82, 0x54, 0xda, 0x28, 0x47, 0x3f, 0xcb, 0xf6, 0x39, + 0x6a, 0x5f, 0xfb, 0xfc, 0xff, 0x89, 0xe7, 0x8e, 0xfc, 0xa5, 0xdd, 0x23, 0xde, 0x39, 0xff, 0x15, + 0xf8, 0x36, 0xb5, 0xb4, 0x17, 0x10, 0xaf, 0x1d, 0x13, 0x8a, 0xab, 0xa2, 0xb7, 0x0a, 0x5d, 0x3e, + 0x5a, 0x6d, 0x0e, 0xc1, 0xc7, 0x59, 0xc1, 0x76, 0xe7, 0xf7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, + 0x20, 0xd9, 0xad, 0x77, 0x10, 0x00, 0x00, } func (m *ProfilesData) Marshal() (dAtA []byte, err error) { @@ -1620,6 +1541,27 @@ func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.AttributeIndices) > 0 { + dAtA4 := make([]byte, len(m.AttributeIndices)*10) + var j3 int + for _, num1 := range m.AttributeIndices { + num := uint64(num1) + for num >= 1<<7 { + dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j3++ + } + dAtA4[j3] = uint8(num) + j3++ + } + i -= j3 + copy(dAtA[i:], dAtA4[:j3]) + i = encodeVarintProfiles(dAtA, i, uint64(j3)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } if len(m.OriginalPayload) > 0 { i -= len(m.OriginalPayload) copy(dAtA[i:], m.OriginalPayload) @@ -1645,22 +1587,6 @@ func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x98 } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } - } { size := m.ProfileId.Size() i -= size @@ -1681,21 +1607,21 @@ func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x80 } if len(m.CommentStrindices) > 0 { - dAtA4 := make([]byte, len(m.CommentStrindices)*10) - var j3 int + dAtA6 := make([]byte, len(m.CommentStrindices)*10) + var j5 int for _, num1 := range m.CommentStrindices { num := uint64(num1) for num >= 1<<7 { - dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) + dAtA6[j5] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j3++ + j5++ } - dAtA4[j3] = uint8(num) - j3++ + dAtA6[j5] = uint8(num) + j5++ } - i -= j3 - copy(dAtA[i:], dAtA4[:j3]) - i = encodeVarintProfiles(dAtA, i, uint64(j3)) + i -= j5 + copy(dAtA[i:], dAtA6[:j5]) + i = encodeVarintProfiles(dAtA, i, uint64(j5)) i-- dAtA[i] = 0x7a } @@ -1790,21 +1716,21 @@ func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { } } if len(m.LocationIndices) > 0 { - dAtA7 := make([]byte, len(m.LocationIndices)*10) - var j6 int + dAtA9 := make([]byte, len(m.LocationIndices)*10) + var j8 int for _, num1 := range m.LocationIndices { num := uint64(num1) for num >= 1<<7 { - dAtA7[j6] = uint8(uint64(num)&0x7f | 0x80) + dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j6++ + j8++ } - dAtA7[j6] = uint8(num) - j6++ + dAtA9[j8] = uint8(num) + j8++ } - i -= j6 - copy(dAtA[i:], dAtA7[:j6]) - i = encodeVarintProfiles(dAtA, i, uint64(j6)) + i -= j8 + copy(dAtA[i:], dAtA9[:j8]) + i = encodeVarintProfiles(dAtA, i, uint64(j8)) i-- dAtA[i] = 0x2a } @@ -2002,20 +1928,20 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if len(m.TimestampsUnixNano) > 0 { - dAtA9 := make([]byte, len(m.TimestampsUnixNano)*10) - var j8 int + dAtA11 := make([]byte, len(m.TimestampsUnixNano)*10) + var j10 int for _, num := range m.TimestampsUnixNano { for num >= 1<<7 { - dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80) + dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j8++ + j10++ } - dAtA9[j8] = uint8(num) - j8++ + dAtA11[j10] = uint8(num) + j10++ } - i -= j8 - copy(dAtA[i:], dAtA9[:j8]) - i = encodeVarintProfiles(dAtA, i, uint64(j8)) + i -= j10 + copy(dAtA[i:], dAtA11[:j10]) + i = encodeVarintProfiles(dAtA, i, uint64(j10)) i-- dAtA[i] = 0x32 } @@ -2029,40 +1955,40 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { } } if len(m.AttributeIndices) > 0 { - dAtA11 := make([]byte, len(m.AttributeIndices)*10) - var j10 int + dAtA13 := make([]byte, len(m.AttributeIndices)*10) + var j12 int for _, num1 := range m.AttributeIndices { num := uint64(num1) for num >= 1<<7 { - dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) + dAtA13[j12] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j10++ + j12++ } - dAtA11[j10] = uint8(num) - j10++ + dAtA13[j12] = uint8(num) + j12++ } - i -= j10 - copy(dAtA[i:], dAtA11[:j10]) - i = encodeVarintProfiles(dAtA, i, uint64(j10)) + i -= j12 + copy(dAtA[i:], dAtA13[:j12]) + i = encodeVarintProfiles(dAtA, i, uint64(j12)) i-- dAtA[i] = 0x22 } if len(m.Value) > 0 { - dAtA13 := make([]byte, len(m.Value)*10) - var j12 int + dAtA15 := make([]byte, len(m.Value)*10) + var j14 int for _, num1 := range m.Value { num := uint64(num1) for num >= 1<<7 { - dAtA13[j12] = uint8(uint64(num)&0x7f | 0x80) + dAtA15[j14] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j12++ + j14++ } - dAtA13[j12] = uint8(num) - j12++ + dAtA15[j14] = uint8(num) + j14++ } - i -= j12 - copy(dAtA[i:], dAtA13[:j12]) - i = encodeVarintProfiles(dAtA, i, uint64(j12)) + i -= j14 + copy(dAtA[i:], dAtA15[:j14]) + i = encodeVarintProfiles(dAtA, i, uint64(j14)) i-- dAtA[i] = 0x1a } @@ -2091,49 +2017,6 @@ func (m *Sample_LinkIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x28 return len(dAtA) - i, nil } -func (m *Label) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Label) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.NumUnitStrindex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.NumUnitStrindex)) - i-- - dAtA[i] = 0x20 - } - if m.Num != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.Num)) - i-- - dAtA[i] = 0x18 - } - if m.StrStrindex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.StrStrindex)) - i-- - dAtA[i] = 0x10 - } - if m.KeyStrindex != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.KeyStrindex)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func (m *Mapping) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2195,21 +2078,21 @@ func (m *Mapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x30 } if len(m.AttributeIndices) > 0 { - dAtA15 := make([]byte, len(m.AttributeIndices)*10) - var j14 int + dAtA17 := make([]byte, len(m.AttributeIndices)*10) + var j16 int for _, num1 := range m.AttributeIndices { num := uint64(num1) for num >= 1<<7 { - dAtA15[j14] = uint8(uint64(num)&0x7f | 0x80) + dAtA17[j16] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j14++ + j16++ } - dAtA15[j14] = uint8(num) - j14++ + dAtA17[j16] = uint8(num) + j16++ } - i -= j14 - copy(dAtA[i:], dAtA15[:j14]) - i = encodeVarintProfiles(dAtA, i, uint64(j14)) + i -= j16 + copy(dAtA[i:], dAtA17[:j16]) + i = encodeVarintProfiles(dAtA, i, uint64(j16)) i-- dAtA[i] = 0x2a } @@ -2257,21 +2140,21 @@ func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if len(m.AttributeIndices) > 0 { - dAtA17 := make([]byte, len(m.AttributeIndices)*10) - var j16 int + dAtA19 := make([]byte, len(m.AttributeIndices)*10) + var j18 int for _, num1 := range m.AttributeIndices { num := uint64(num1) for num >= 1<<7 { - dAtA17[j16] = uint8(uint64(num)&0x7f | 0x80) + dAtA19[j18] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j16++ + j18++ } - dAtA17[j16] = uint8(num) - j16++ + dAtA19[j18] = uint8(num) + j18++ } - i -= j16 - copy(dAtA[i:], dAtA17[:j16]) - i = encodeVarintProfiles(dAtA, i, uint64(j16)) + i -= j18 + copy(dAtA[i:], dAtA19[:j18]) + i = encodeVarintProfiles(dAtA, i, uint64(j18)) i-- dAtA[i] = 0x2a } @@ -2567,12 +2450,6 @@ func (m *Profile) Size() (n int) { } l = m.ProfileId.Size() n += 2 + l + sovProfiles(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 2 + l + sovProfiles(uint64(l)) - } - } if m.DroppedAttributesCount != 0 { n += 2 + sovProfiles(uint64(m.DroppedAttributesCount)) } @@ -2584,6 +2461,13 @@ func (m *Profile) Size() (n int) { if l > 0 { n += 2 + l + sovProfiles(uint64(l)) } + if len(m.AttributeIndices) > 0 { + l = 0 + for _, e := range m.AttributeIndices { + l += sovProfiles(uint64(e)) + } + n += 2 + sovProfiles(uint64(l)) + l + } return n } @@ -2681,27 +2565,6 @@ func (m *Sample_LinkIndex) Size() (n int) { n += 1 + sovProfiles(uint64(m.LinkIndex)) return n } -func (m *Label) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.KeyStrindex != 0 { - n += 1 + sovProfiles(uint64(m.KeyStrindex)) - } - if m.StrStrindex != 0 { - n += 1 + sovProfiles(uint64(m.StrStrindex)) - } - if m.Num != 0 { - n += 1 + sovProfiles(uint64(m.Num)) - } - if m.NumUnitStrindex != 0 { - n += 1 + sovProfiles(uint64(m.NumUnitStrindex)) - } - return n -} - func (m *Mapping) Size() (n int) { if m == nil { return 0 @@ -3836,40 +3699,6 @@ func (m *Profile) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 19: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) @@ -3955,6 +3784,82 @@ func (m *Profile) Unmarshal(dAtA []byte) error { m.OriginalPayload = []byte{} } iNdEx = postIndex + case 22: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AttributeIndices = append(m.AttributeIndices, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.AttributeIndices) == 0 { + m.AttributeIndices = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AttributeIndices = append(m.AttributeIndices, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType) + } default: iNdEx = preIndex skippy, err := skipProfiles(dAtA[iNdEx:]) @@ -4623,132 +4528,6 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } return nil } -func (m *Label) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Label: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Label: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyStrindex", wireType) - } - m.KeyStrindex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeyStrindex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StrStrindex", wireType) - } - m.StrStrindex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StrStrindex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Num", wireType) - } - m.Num = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Num |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumUnitStrindex", wireType) - } - m.NumUnitStrindex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumUnitStrindex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *Mapping) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go index c1fcf0764e0..b0bddfb985f 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go @@ -235,7 +235,8 @@ type ResourceSpans struct { // A list of ScopeSpans that originate from a resource. ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_spans" field which have their own schema_url field. @@ -312,7 +313,8 @@ type ScopeSpans struct { // A list of Spans that originate from an instrumentation scope. Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all spans and span events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/encoding.go b/vendor/go.opentelemetry.io/collector/pdata/plog/encoding.go new file mode 100644 index 00000000000..99e6ac83653 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/encoding.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plog // import "go.opentelemetry.io/collector/pdata/plog" + +// MarshalSizer is the interface that groups the basic Marshal and Size methods +type MarshalSizer interface { + Marshaler + Sizer +} + +// Marshaler marshals pdata.Logs into bytes. +type Marshaler interface { + // MarshalLogs the given pdata.Logs into bytes. + // If the error is not nil, the returned bytes slice cannot be used. + MarshalLogs(ld Logs) ([]byte, error) +} + +// Unmarshaler unmarshalls bytes into pdata.Logs. +type Unmarshaler interface { + // UnmarshalLogs the given bytes into pdata.Logs. + // If the error is not nil, the returned pdata.Logs cannot be used. + UnmarshalLogs(buf []byte) (Logs, error) +} + +// Sizer is an optional interface implemented by the Marshaler, +// that calculates the size of a marshaled Logs. +type Sizer interface { + // LogsSize returns the size in bytes of a marshaled Logs. + LogsSize(ld Logs) int +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go new file mode 100644 index 00000000000..ee79ddec3cd --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go @@ -0,0 +1,173 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package plog + +import ( + "go.opentelemetry.io/collector/pdata/internal" + "go.opentelemetry.io/collector/pdata/internal/data" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// LogRecord are experimental implementation of OpenTelemetry Log Data Model. + +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewLogRecord function to create new instances. +// Important: zero-initialized instance is not valid for use. +type LogRecord struct { + orig *otlplogs.LogRecord + state *internal.State +} + +func newLogRecord(orig *otlplogs.LogRecord, state *internal.State) LogRecord { + return LogRecord{orig: orig, state: state} +} + +// NewLogRecord creates a new empty LogRecord. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewLogRecord() LogRecord { + state := internal.StateMutable + return newLogRecord(&otlplogs.LogRecord{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms LogRecord) MoveTo(dest LogRecord) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlplogs.LogRecord{} +} + +// ObservedTimestamp returns the observedtimestamp associated with this LogRecord. +func (ms LogRecord) ObservedTimestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.ObservedTimeUnixNano) +} + +// SetObservedTimestamp replaces the observedtimestamp associated with this LogRecord. +func (ms LogRecord) SetObservedTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.ObservedTimeUnixNano = uint64(v) +} + +// Timestamp returns the timestamp associated with this LogRecord. +func (ms LogRecord) Timestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this LogRecord. +func (ms LogRecord) SetTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.TimeUnixNano = uint64(v) +} + +// TraceID returns the traceid associated with this LogRecord. +func (ms LogRecord) TraceID() pcommon.TraceID { + return pcommon.TraceID(ms.orig.TraceId) +} + +// SetTraceID replaces the traceid associated with this LogRecord. +func (ms LogRecord) SetTraceID(v pcommon.TraceID) { + ms.state.AssertMutable() + ms.orig.TraceId = data.TraceID(v) +} + +// SpanID returns the spanid associated with this LogRecord. +func (ms LogRecord) SpanID() pcommon.SpanID { + return pcommon.SpanID(ms.orig.SpanId) +} + +// SetSpanID replaces the spanid associated with this LogRecord. +func (ms LogRecord) SetSpanID(v pcommon.SpanID) { + ms.state.AssertMutable() + ms.orig.SpanId = data.SpanID(v) +} + +// Flags returns the flags associated with this LogRecord. +func (ms LogRecord) Flags() LogRecordFlags { + return LogRecordFlags(ms.orig.Flags) +} + +// SetFlags replaces the flags associated with this LogRecord. +func (ms LogRecord) SetFlags(v LogRecordFlags) { + ms.state.AssertMutable() + ms.orig.Flags = uint32(v) +} + +// EventName returns the eventname associated with this LogRecord. +func (ms LogRecord) EventName() string { + return ms.orig.EventName +} + +// SetEventName replaces the eventname associated with this LogRecord. +func (ms LogRecord) SetEventName(v string) { + ms.state.AssertMutable() + ms.orig.EventName = v +} + +// SeverityText returns the severitytext associated with this LogRecord. +func (ms LogRecord) SeverityText() string { + return ms.orig.SeverityText +} + +// SetSeverityText replaces the severitytext associated with this LogRecord. +func (ms LogRecord) SetSeverityText(v string) { + ms.state.AssertMutable() + ms.orig.SeverityText = v +} + +// SeverityNumber returns the severitynumber associated with this LogRecord. +func (ms LogRecord) SeverityNumber() SeverityNumber { + return SeverityNumber(ms.orig.SeverityNumber) +} + +// SetSeverityNumber replaces the severitynumber associated with this LogRecord. +func (ms LogRecord) SetSeverityNumber(v SeverityNumber) { + ms.state.AssertMutable() + ms.orig.SeverityNumber = otlplogs.SeverityNumber(v) +} + +// Body returns the body associated with this LogRecord. +func (ms LogRecord) Body() pcommon.Value { + return pcommon.Value(internal.NewValue(&ms.orig.Body, ms.state)) +} + +// Attributes returns the Attributes associated with this LogRecord. +func (ms LogRecord) Attributes() pcommon.Map { + return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this LogRecord. +func (ms LogRecord) DroppedAttributesCount() uint32 { + return ms.orig.DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this LogRecord. +func (ms LogRecord) SetDroppedAttributesCount(v uint32) { + ms.state.AssertMutable() + ms.orig.DroppedAttributesCount = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms LogRecord) CopyTo(dest LogRecord) { + dest.state.AssertMutable() + dest.SetObservedTimestamp(ms.ObservedTimestamp()) + dest.SetTimestamp(ms.Timestamp()) + dest.SetTraceID(ms.TraceID()) + dest.SetSpanID(ms.SpanID()) + dest.SetFlags(ms.Flags()) + dest.SetEventName(ms.EventName()) + dest.SetSeverityText(ms.SeverityText()) + dest.SetSeverityNumber(ms.SeverityNumber()) + ms.Body().CopyTo(dest.Body()) + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go new file mode 100644 index 00000000000..a900b4e1c7e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package plog + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" +) + +// LogRecordSlice logically represents a slice of LogRecord. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewLogRecordSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type LogRecordSlice struct { + orig *[]*otlplogs.LogRecord + state *internal.State +} + +func newLogRecordSlice(orig *[]*otlplogs.LogRecord, state *internal.State) LogRecordSlice { + return LogRecordSlice{orig: orig, state: state} +} + +// NewLogRecordSlice creates a LogRecordSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewLogRecordSlice() LogRecordSlice { + orig := []*otlplogs.LogRecord(nil) + state := internal.StateMutable + return newLogRecordSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewLogRecordSlice()". +func (es LogRecordSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es LogRecordSlice) At(i int) LogRecord { + return newLogRecord((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new LogRecordSlice can be initialized: +// +// es := NewLogRecordSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es LogRecordSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlplogs.LogRecord, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty LogRecord. +// It returns the newly added LogRecord. +func (es LogRecordSlice) AppendEmpty() LogRecord { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlplogs.LogRecord{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es LogRecordSlice) MoveAndAppendTo(dest LogRecordSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es LogRecordSlice) RemoveIf(f func(LogRecord) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es LogRecordSlice) CopyTo(dest LogRecordSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newLogRecord((*es.orig)[i], es.state).CopyTo(newLogRecord((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlplogs.LogRecord, srcLen) + wrappers := make([]*otlplogs.LogRecord, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newLogRecord((*es.orig)[i], es.state).CopyTo(newLogRecord(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the LogRecord elements within LogRecordSlice given the +// provided less function so that two instances of LogRecordSlice +// can be compared. +func (es LogRecordSlice) Sort(less func(a, b LogRecord) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go new file mode 100644 index 00000000000..1d240e03975 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package plog + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ResourceLogs is a collection of logs from a Resource. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceLogs function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceLogs struct { + orig *otlplogs.ResourceLogs + state *internal.State +} + +func newResourceLogs(orig *otlplogs.ResourceLogs, state *internal.State) ResourceLogs { + return ResourceLogs{orig: orig, state: state} +} + +// NewResourceLogs creates a new empty ResourceLogs. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewResourceLogs() ResourceLogs { + state := internal.StateMutable + return newResourceLogs(&otlplogs.ResourceLogs{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ResourceLogs) MoveTo(dest ResourceLogs) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlplogs.ResourceLogs{} +} + +// Resource returns the resource associated with this ResourceLogs. +func (ms ResourceLogs) Resource() pcommon.Resource { + return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state)) +} + +// SchemaUrl returns the schemaurl associated with this ResourceLogs. +func (ms ResourceLogs) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ResourceLogs. +func (ms ResourceLogs) SetSchemaUrl(v string) { + ms.state.AssertMutable() + ms.orig.SchemaUrl = v +} + +// ScopeLogs returns the ScopeLogs associated with this ResourceLogs. +func (ms ResourceLogs) ScopeLogs() ScopeLogsSlice { + return newScopeLogsSlice(&ms.orig.ScopeLogs, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ResourceLogs) CopyTo(dest ResourceLogs) { + dest.state.AssertMutable() + ms.Resource().CopyTo(dest.Resource()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.ScopeLogs().CopyTo(dest.ScopeLogs()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go new file mode 100644 index 00000000000..d2fc54de80b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package plog + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" +) + +// ResourceLogsSlice logically represents a slice of ResourceLogs. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewResourceLogsSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceLogsSlice struct { + orig *[]*otlplogs.ResourceLogs + state *internal.State +} + +func newResourceLogsSlice(orig *[]*otlplogs.ResourceLogs, state *internal.State) ResourceLogsSlice { + return ResourceLogsSlice{orig: orig, state: state} +} + +// NewResourceLogsSlice creates a ResourceLogsSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewResourceLogsSlice() ResourceLogsSlice { + orig := []*otlplogs.ResourceLogs(nil) + state := internal.StateMutable + return newResourceLogsSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewResourceLogsSlice()". +func (es ResourceLogsSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ResourceLogsSlice) At(i int) ResourceLogs { + return newResourceLogs((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ResourceLogsSlice can be initialized: +// +// es := NewResourceLogsSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ResourceLogsSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlplogs.ResourceLogs, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ResourceLogs. +// It returns the newly added ResourceLogs. +func (es ResourceLogsSlice) AppendEmpty() ResourceLogs { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlplogs.ResourceLogs{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ResourceLogsSlice) MoveAndAppendTo(dest ResourceLogsSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ResourceLogsSlice) RemoveIf(f func(ResourceLogs) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ResourceLogsSlice) CopyTo(dest ResourceLogsSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newResourceLogs((*es.orig)[i], es.state).CopyTo(newResourceLogs((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlplogs.ResourceLogs, srcLen) + wrappers := make([]*otlplogs.ResourceLogs, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newResourceLogs((*es.orig)[i], es.state).CopyTo(newResourceLogs(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ResourceLogs elements within ResourceLogsSlice given the +// provided less function so that two instances of ResourceLogsSlice +// can be compared. +func (es ResourceLogsSlice) Sort(less func(a, b ResourceLogs) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go new file mode 100644 index 00000000000..6e45a9627f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package plog + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ScopeLogs is a collection of logs from a LibraryInstrumentation. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewScopeLogs function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeLogs struct { + orig *otlplogs.ScopeLogs + state *internal.State +} + +func newScopeLogs(orig *otlplogs.ScopeLogs, state *internal.State) ScopeLogs { + return ScopeLogs{orig: orig, state: state} +} + +// NewScopeLogs creates a new empty ScopeLogs. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewScopeLogs() ScopeLogs { + state := internal.StateMutable + return newScopeLogs(&otlplogs.ScopeLogs{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ScopeLogs) MoveTo(dest ScopeLogs) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlplogs.ScopeLogs{} +} + +// Scope returns the scope associated with this ScopeLogs. +func (ms ScopeLogs) Scope() pcommon.InstrumentationScope { + return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state)) +} + +// SchemaUrl returns the schemaurl associated with this ScopeLogs. +func (ms ScopeLogs) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ScopeLogs. +func (ms ScopeLogs) SetSchemaUrl(v string) { + ms.state.AssertMutable() + ms.orig.SchemaUrl = v +} + +// LogRecords returns the LogRecords associated with this ScopeLogs. +func (ms ScopeLogs) LogRecords() LogRecordSlice { + return newLogRecordSlice(&ms.orig.LogRecords, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ScopeLogs) CopyTo(dest ScopeLogs) { + dest.state.AssertMutable() + ms.Scope().CopyTo(dest.Scope()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.LogRecords().CopyTo(dest.LogRecords()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go new file mode 100644 index 00000000000..5bae8d9f9c9 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package plog + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" +) + +// ScopeLogsSlice logically represents a slice of ScopeLogs. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewScopeLogsSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeLogsSlice struct { + orig *[]*otlplogs.ScopeLogs + state *internal.State +} + +func newScopeLogsSlice(orig *[]*otlplogs.ScopeLogs, state *internal.State) ScopeLogsSlice { + return ScopeLogsSlice{orig: orig, state: state} +} + +// NewScopeLogsSlice creates a ScopeLogsSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewScopeLogsSlice() ScopeLogsSlice { + orig := []*otlplogs.ScopeLogs(nil) + state := internal.StateMutable + return newScopeLogsSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewScopeLogsSlice()". +func (es ScopeLogsSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ScopeLogsSlice) At(i int) ScopeLogs { + return newScopeLogs((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ScopeLogsSlice can be initialized: +// +// es := NewScopeLogsSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ScopeLogsSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlplogs.ScopeLogs, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ScopeLogs. +// It returns the newly added ScopeLogs. +func (es ScopeLogsSlice) AppendEmpty() ScopeLogs { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlplogs.ScopeLogs{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ScopeLogsSlice) MoveAndAppendTo(dest ScopeLogsSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ScopeLogsSlice) RemoveIf(f func(ScopeLogs) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ScopeLogsSlice) CopyTo(dest ScopeLogsSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newScopeLogs((*es.orig)[i], es.state).CopyTo(newScopeLogs((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlplogs.ScopeLogs, srcLen) + wrappers := make([]*otlplogs.ScopeLogs, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newScopeLogs((*es.orig)[i], es.state).CopyTo(newScopeLogs(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ScopeLogs elements within ScopeLogsSlice given the +// provided less function so that two instances of ScopeLogsSlice +// can be compared. +func (es ScopeLogsSlice) Sort(less func(a, b ScopeLogs) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/json.go b/vendor/go.opentelemetry.io/collector/pdata/plog/json.go new file mode 100644 index 00000000000..373985e7090 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/json.go @@ -0,0 +1,135 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plog // import "go.opentelemetry.io/collector/pdata/plog" + +import ( + "bytes" + "fmt" + + jsoniter "github.com/json-iterator/go" + + "go.opentelemetry.io/collector/pdata/internal" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/otlp" +) + +// JSONMarshaler marshals pdata.Logs to JSON bytes using the OTLP/JSON format. +type JSONMarshaler struct{} + +// MarshalLogs to the OTLP/JSON format. +func (*JSONMarshaler) MarshalLogs(ld Logs) ([]byte, error) { + buf := bytes.Buffer{} + pb := internal.LogsToProto(internal.Logs(ld)) + err := json.Marshal(&buf, &pb) + return buf.Bytes(), err +} + +var _ Unmarshaler = (*JSONUnmarshaler)(nil) + +// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pdata.Logs. +type JSONUnmarshaler struct{} + +// UnmarshalLogs from OTLP/JSON format into pdata.Logs. +func (*JSONUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) { + iter := jsoniter.ConfigFastest.BorrowIterator(buf) + defer jsoniter.ConfigFastest.ReturnIterator(iter) + ld := NewLogs() + ld.unmarshalJsoniter(iter) + if iter.Error != nil { + return Logs{}, iter.Error + } + otlp.MigrateLogs(ld.getOrig().ResourceLogs) + return ld, nil +} + +func (ms Logs) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resource_logs", "resourceLogs": + iter.ReadArrayCB(func(*jsoniter.Iterator) bool { + ms.ResourceLogs().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (ms ResourceLogs) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resource": + json.ReadResource(iter, &ms.orig.Resource) + case "scope_logs", "scopeLogs": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.ScopeLogs().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + ms.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (ms ScopeLogs) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "scope": + json.ReadScope(iter, &ms.orig.Scope) + case "log_records", "logRecords": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.LogRecords().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + ms.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (ms LogRecord) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "timeUnixNano", "time_unix_nano": + ms.orig.TimeUnixNano = json.ReadUint64(iter) + case "observed_time_unix_nano", "observedTimeUnixNano": + ms.orig.ObservedTimeUnixNano = json.ReadUint64(iter) + case "severity_number", "severityNumber": + ms.orig.SeverityNumber = otlplogs.SeverityNumber(json.ReadEnumValue(iter, otlplogs.SeverityNumber_value)) + case "severity_text", "severityText": + ms.orig.SeverityText = iter.ReadString() + case "body": + json.ReadValue(iter, &ms.orig.Body) + case "attributes": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter)) + return true + }) + case "droppedAttributesCount", "dropped_attributes_count": + ms.orig.DroppedAttributesCount = json.ReadUint32(iter) + case "flags": + ms.orig.Flags = json.ReadUint32(iter) + case "traceId", "trace_id": + if err := ms.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("readLog.traceId", fmt.Sprintf("parse trace_id:%v", err)) + } + case "spanId", "span_id": + if err := ms.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("readLog.spanId", fmt.Sprintf("parse span_id:%v", err)) + } + default: + iter.Skip() + } + return true + }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/log_record_flags.go b/vendor/go.opentelemetry.io/collector/pdata/plog/log_record_flags.go new file mode 100644 index 00000000000..4a866bcf641 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/log_record_flags.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plog // import "go.opentelemetry.io/collector/pdata/plog" + +const isSampledMask = uint32(1) + +var DefaultLogRecordFlags = LogRecordFlags(0) + +// LogRecordFlags defines flags for the LogRecord. The 8 least significant bits are the trace flags as +// defined in W3C Trace Context specification. 24 most significant bits are reserved and must be set to 0. +type LogRecordFlags uint32 + +// IsSampled returns true if the LogRecordFlags contains the IsSampled flag. +func (ms LogRecordFlags) IsSampled() bool { + return uint32(ms)&isSampledMask != 0 +} + +// WithIsSampled returns a new LogRecordFlags, with the IsSampled flag set to the given value. +func (ms LogRecordFlags) WithIsSampled(b bool) LogRecordFlags { + orig := uint32(ms) + if b { + orig |= isSampledMask + } else { + orig &^= isSampledMask + } + return LogRecordFlags(orig) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/logs.go b/vendor/go.opentelemetry.io/collector/pdata/plog/logs.go new file mode 100644 index 00000000000..490526090f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/logs.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plog // import "go.opentelemetry.io/collector/pdata/plog" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" +) + +// Logs is the top-level struct that is propagated through the logs pipeline. +// Use NewLogs to create new instance, zero-initialized instance is not valid for use. +type Logs internal.Logs + +func newLogs(orig *otlpcollectorlog.ExportLogsServiceRequest) Logs { + state := internal.StateMutable + return Logs(internal.NewLogs(orig, &state)) +} + +func (ms Logs) getOrig() *otlpcollectorlog.ExportLogsServiceRequest { + return internal.GetOrigLogs(internal.Logs(ms)) +} + +func (ms Logs) getState() *internal.State { + return internal.GetLogsState(internal.Logs(ms)) +} + +// NewLogs creates a new Logs struct. +func NewLogs() Logs { + return newLogs(&otlpcollectorlog.ExportLogsServiceRequest{}) +} + +// IsReadOnly returns true if this Logs instance is read-only. +func (ms Logs) IsReadOnly() bool { + return *ms.getState() == internal.StateReadOnly +} + +// CopyTo copies the Logs instance overriding the destination. +func (ms Logs) CopyTo(dest Logs) { + ms.ResourceLogs().CopyTo(dest.ResourceLogs()) +} + +// LogRecordCount calculates the total number of log records. +func (ms Logs) LogRecordCount() int { + logCount := 0 + rss := ms.ResourceLogs() + for i := 0; i < rss.Len(); i++ { + rs := rss.At(i) + ill := rs.ScopeLogs() + for i := 0; i < ill.Len(); i++ { + logs := ill.At(i) + logCount += logs.LogRecords().Len() + } + } + return logCount +} + +// ResourceLogs returns the ResourceLogsSlice associated with this Logs. +func (ms Logs) ResourceLogs() ResourceLogsSlice { + return newResourceLogsSlice(&ms.getOrig().ResourceLogs, internal.GetLogsState(internal.Logs(ms))) +} + +// MarkReadOnly marks the Logs as shared so that no further modifications can be done on it. +func (ms Logs) MarkReadOnly() { + internal.SetLogsState(internal.Logs(ms), internal.StateReadOnly) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go b/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go new file mode 100644 index 00000000000..bb102591bf2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plog // import "go.opentelemetry.io/collector/pdata/plog" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" +) + +var _ MarshalSizer = (*ProtoMarshaler)(nil) + +type ProtoMarshaler struct{} + +func (e *ProtoMarshaler) MarshalLogs(ld Logs) ([]byte, error) { + pb := internal.LogsToProto(internal.Logs(ld)) + return pb.Marshal() +} + +func (e *ProtoMarshaler) LogsSize(ld Logs) int { + pb := internal.LogsToProto(internal.Logs(ld)) + return pb.Size() +} + +var _ Unmarshaler = (*ProtoUnmarshaler)(nil) + +type ProtoUnmarshaler struct{} + +func (d *ProtoUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) { + pb := otlplogs.LogsData{} + err := pb.Unmarshal(buf) + return Logs(internal.LogsFromProto(pb)), err +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go b/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go new file mode 100644 index 00000000000..53a9d4179c4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plog // import "go.opentelemetry.io/collector/pdata/plog" + +import ( + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" +) + +// SeverityNumber represents severity number of a log record. +type SeverityNumber int32 + +const ( + SeverityNumberUnspecified = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED) + SeverityNumberTrace = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE) + SeverityNumberTrace2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE2) + SeverityNumberTrace3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE3) + SeverityNumberTrace4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE4) + SeverityNumberDebug = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG) + SeverityNumberDebug2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG2) + SeverityNumberDebug3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG3) + SeverityNumberDebug4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG4) + SeverityNumberInfo = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO) + SeverityNumberInfo2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO2) + SeverityNumberInfo3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO3) + SeverityNumberInfo4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO4) + SeverityNumberWarn = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN) + SeverityNumberWarn2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN2) + SeverityNumberWarn3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN3) + SeverityNumberWarn4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN4) + SeverityNumberError = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR) + SeverityNumberError2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR2) + SeverityNumberError3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR3) + SeverityNumberError4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR4) + SeverityNumberFatal = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL) + SeverityNumberFatal2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL2) + SeverityNumberFatal3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL3) + SeverityNumberFatal4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL4) +) + +// String returns the string representation of the SeverityNumber. +func (sn SeverityNumber) String() string { + switch sn { + case SeverityNumberUnspecified: + return "Unspecified" + case SeverityNumberTrace: + return "Trace" + case SeverityNumberTrace2: + return "Trace2" + case SeverityNumberTrace3: + return "Trace3" + case SeverityNumberTrace4: + return "Trace4" + case SeverityNumberDebug: + return "Debug" + case SeverityNumberDebug2: + return "Debug2" + case SeverityNumberDebug3: + return "Debug3" + case SeverityNumberDebug4: + return "Debug4" + case SeverityNumberInfo: + return "Info" + case SeverityNumberInfo2: + return "Info2" + case SeverityNumberInfo3: + return "Info3" + case SeverityNumberInfo4: + return "Info4" + case SeverityNumberWarn: + return "Warn" + case SeverityNumberWarn2: + return "Warn2" + case SeverityNumberWarn3: + return "Warn3" + case SeverityNumberWarn4: + return "Warn4" + case SeverityNumberError: + return "Error" + case SeverityNumberError2: + return "Error2" + case SeverityNumberError3: + return "Error3" + case SeverityNumberError4: + return "Error4" + case SeverityNumberFatal: + return "Fatal" + case SeverityNumberFatal2: + return "Fatal2" + case SeverityNumberFatal3: + return "Fatal3" + case SeverityNumberFatal4: + return "Fatal4" + } + return "" +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/encoding.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/encoding.go new file mode 100644 index 00000000000..8212a03e11c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/encoding.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" + +// MarshalSizer is the interface that groups the basic Marshal and Size methods +type MarshalSizer interface { + Marshaler + Sizer +} + +// Marshaler marshals pdata.Traces into bytes. +type Marshaler interface { + // MarshalTraces the given pdata.Traces into bytes. + // If the error is not nil, the returned bytes slice cannot be used. + MarshalTraces(td Traces) ([]byte, error) +} + +// Unmarshaler unmarshalls bytes into pdata.Traces. +type Unmarshaler interface { + // UnmarshalTraces the given bytes into pdata.Traces. + // If the error is not nil, the returned pdata.Traces cannot be used. + UnmarshalTraces(buf []byte) (Traces, error) +} + +// Sizer is an optional interface implemented by the Marshaler, +// that calculates the size of a marshaled Traces. +type Sizer interface { + // TracesSize returns the size in bytes of a marshaled Traces. + TracesSize(td Traces) int +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go new file mode 100644 index 00000000000..fc2ed01dbbc --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package ptrace + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ResourceSpans is a collection of spans from a Resource. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceSpans function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceSpans struct { + orig *otlptrace.ResourceSpans + state *internal.State +} + +func newResourceSpans(orig *otlptrace.ResourceSpans, state *internal.State) ResourceSpans { + return ResourceSpans{orig: orig, state: state} +} + +// NewResourceSpans creates a new empty ResourceSpans. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewResourceSpans() ResourceSpans { + state := internal.StateMutable + return newResourceSpans(&otlptrace.ResourceSpans{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ResourceSpans) MoveTo(dest ResourceSpans) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlptrace.ResourceSpans{} +} + +// Resource returns the resource associated with this ResourceSpans. +func (ms ResourceSpans) Resource() pcommon.Resource { + return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state)) +} + +// SchemaUrl returns the schemaurl associated with this ResourceSpans. +func (ms ResourceSpans) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ResourceSpans. +func (ms ResourceSpans) SetSchemaUrl(v string) { + ms.state.AssertMutable() + ms.orig.SchemaUrl = v +} + +// ScopeSpans returns the ScopeSpans associated with this ResourceSpans. +func (ms ResourceSpans) ScopeSpans() ScopeSpansSlice { + return newScopeSpansSlice(&ms.orig.ScopeSpans, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ResourceSpans) CopyTo(dest ResourceSpans) { + dest.state.AssertMutable() + ms.Resource().CopyTo(dest.Resource()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.ScopeSpans().CopyTo(dest.ScopeSpans()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go new file mode 100644 index 00000000000..da79ef4a342 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package ptrace + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" +) + +// ResourceSpansSlice logically represents a slice of ResourceSpans. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewResourceSpansSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceSpansSlice struct { + orig *[]*otlptrace.ResourceSpans + state *internal.State +} + +func newResourceSpansSlice(orig *[]*otlptrace.ResourceSpans, state *internal.State) ResourceSpansSlice { + return ResourceSpansSlice{orig: orig, state: state} +} + +// NewResourceSpansSlice creates a ResourceSpansSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewResourceSpansSlice() ResourceSpansSlice { + orig := []*otlptrace.ResourceSpans(nil) + state := internal.StateMutable + return newResourceSpansSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewResourceSpansSlice()". +func (es ResourceSpansSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ResourceSpansSlice) At(i int) ResourceSpans { + return newResourceSpans((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ResourceSpansSlice can be initialized: +// +// es := NewResourceSpansSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ResourceSpansSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlptrace.ResourceSpans, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ResourceSpans. +// It returns the newly added ResourceSpans. +func (es ResourceSpansSlice) AppendEmpty() ResourceSpans { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlptrace.ResourceSpans{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ResourceSpansSlice) MoveAndAppendTo(dest ResourceSpansSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ResourceSpansSlice) RemoveIf(f func(ResourceSpans) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ResourceSpansSlice) CopyTo(dest ResourceSpansSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newResourceSpans((*es.orig)[i], es.state).CopyTo(newResourceSpans((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlptrace.ResourceSpans, srcLen) + wrappers := make([]*otlptrace.ResourceSpans, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newResourceSpans((*es.orig)[i], es.state).CopyTo(newResourceSpans(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ResourceSpans elements within ResourceSpansSlice given the +// provided less function so that two instances of ResourceSpansSlice +// can be compared. +func (es ResourceSpansSlice) Sort(less func(a, b ResourceSpans) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go new file mode 100644 index 00000000000..6ea0d82fd68 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package ptrace + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ScopeSpans is a collection of spans from a LibraryInstrumentation. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewScopeSpans function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeSpans struct { + orig *otlptrace.ScopeSpans + state *internal.State +} + +func newScopeSpans(orig *otlptrace.ScopeSpans, state *internal.State) ScopeSpans { + return ScopeSpans{orig: orig, state: state} +} + +// NewScopeSpans creates a new empty ScopeSpans. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewScopeSpans() ScopeSpans { + state := internal.StateMutable + return newScopeSpans(&otlptrace.ScopeSpans{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ScopeSpans) MoveTo(dest ScopeSpans) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlptrace.ScopeSpans{} +} + +// Scope returns the scope associated with this ScopeSpans. +func (ms ScopeSpans) Scope() pcommon.InstrumentationScope { + return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state)) +} + +// SchemaUrl returns the schemaurl associated with this ScopeSpans. +func (ms ScopeSpans) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ScopeSpans. +func (ms ScopeSpans) SetSchemaUrl(v string) { + ms.state.AssertMutable() + ms.orig.SchemaUrl = v +} + +// Spans returns the Spans associated with this ScopeSpans. +func (ms ScopeSpans) Spans() SpanSlice { + return newSpanSlice(&ms.orig.Spans, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ScopeSpans) CopyTo(dest ScopeSpans) { + dest.state.AssertMutable() + ms.Scope().CopyTo(dest.Scope()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.Spans().CopyTo(dest.Spans()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go new file mode 100644 index 00000000000..8fd0b4b8e99 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package ptrace + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" +) + +// ScopeSpansSlice logically represents a slice of ScopeSpans. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewScopeSpansSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeSpansSlice struct { + orig *[]*otlptrace.ScopeSpans + state *internal.State +} + +func newScopeSpansSlice(orig *[]*otlptrace.ScopeSpans, state *internal.State) ScopeSpansSlice { + return ScopeSpansSlice{orig: orig, state: state} +} + +// NewScopeSpansSlice creates a ScopeSpansSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewScopeSpansSlice() ScopeSpansSlice { + orig := []*otlptrace.ScopeSpans(nil) + state := internal.StateMutable + return newScopeSpansSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewScopeSpansSlice()". +func (es ScopeSpansSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ScopeSpansSlice) At(i int) ScopeSpans { + return newScopeSpans((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ScopeSpansSlice can be initialized: +// +// es := NewScopeSpansSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ScopeSpansSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlptrace.ScopeSpans, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ScopeSpans. +// It returns the newly added ScopeSpans. +func (es ScopeSpansSlice) AppendEmpty() ScopeSpans { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlptrace.ScopeSpans{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ScopeSpansSlice) MoveAndAppendTo(dest ScopeSpansSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ScopeSpansSlice) RemoveIf(f func(ScopeSpans) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ScopeSpansSlice) CopyTo(dest ScopeSpansSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newScopeSpans((*es.orig)[i], es.state).CopyTo(newScopeSpans((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlptrace.ScopeSpans, srcLen) + wrappers := make([]*otlptrace.ScopeSpans, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newScopeSpans((*es.orig)[i], es.state).CopyTo(newScopeSpans(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ScopeSpans elements within ScopeSpansSlice given the +// provided less function so that two instances of ScopeSpansSlice +// can be compared. +func (es ScopeSpansSlice) Sort(less func(a, b ScopeSpans) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go new file mode 100644 index 00000000000..56a701974c4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go @@ -0,0 +1,216 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package ptrace + +import ( + "go.opentelemetry.io/collector/pdata/internal" + "go.opentelemetry.io/collector/pdata/internal/data" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Span represents a single operation within a trace. +// See Span definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSpan function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Span struct { + orig *otlptrace.Span + state *internal.State +} + +func newSpan(orig *otlptrace.Span, state *internal.State) Span { + return Span{orig: orig, state: state} +} + +// NewSpan creates a new empty Span. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewSpan() Span { + state := internal.StateMutable + return newSpan(&otlptrace.Span{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Span) MoveTo(dest Span) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlptrace.Span{} +} + +// TraceID returns the traceid associated with this Span. +func (ms Span) TraceID() pcommon.TraceID { + return pcommon.TraceID(ms.orig.TraceId) +} + +// SetTraceID replaces the traceid associated with this Span. +func (ms Span) SetTraceID(v pcommon.TraceID) { + ms.state.AssertMutable() + ms.orig.TraceId = data.TraceID(v) +} + +// SpanID returns the spanid associated with this Span. +func (ms Span) SpanID() pcommon.SpanID { + return pcommon.SpanID(ms.orig.SpanId) +} + +// SetSpanID replaces the spanid associated with this Span. +func (ms Span) SetSpanID(v pcommon.SpanID) { + ms.state.AssertMutable() + ms.orig.SpanId = data.SpanID(v) +} + +// TraceState returns the tracestate associated with this Span. +func (ms Span) TraceState() pcommon.TraceState { + return pcommon.TraceState(internal.NewTraceState(&ms.orig.TraceState, ms.state)) +} + +// ParentSpanID returns the parentspanid associated with this Span. +func (ms Span) ParentSpanID() pcommon.SpanID { + return pcommon.SpanID(ms.orig.ParentSpanId) +} + +// SetParentSpanID replaces the parentspanid associated with this Span. +func (ms Span) SetParentSpanID(v pcommon.SpanID) { + ms.state.AssertMutable() + ms.orig.ParentSpanId = data.SpanID(v) +} + +// Name returns the name associated with this Span. +func (ms Span) Name() string { + return ms.orig.Name +} + +// SetName replaces the name associated with this Span. +func (ms Span) SetName(v string) { + ms.state.AssertMutable() + ms.orig.Name = v +} + +// Flags returns the flags associated with this Span. +func (ms Span) Flags() uint32 { + return ms.orig.Flags +} + +// SetFlags replaces the flags associated with this Span. +func (ms Span) SetFlags(v uint32) { + ms.state.AssertMutable() + ms.orig.Flags = v +} + +// Kind returns the kind associated with this Span. +func (ms Span) Kind() SpanKind { + return SpanKind(ms.orig.Kind) +} + +// SetKind replaces the kind associated with this Span. +func (ms Span) SetKind(v SpanKind) { + ms.state.AssertMutable() + ms.orig.Kind = otlptrace.Span_SpanKind(v) +} + +// StartTimestamp returns the starttimestamp associated with this Span. +func (ms Span) StartTimestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.StartTimeUnixNano) +} + +// SetStartTimestamp replaces the starttimestamp associated with this Span. +func (ms Span) SetStartTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.StartTimeUnixNano = uint64(v) +} + +// EndTimestamp returns the endtimestamp associated with this Span. +func (ms Span) EndTimestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.EndTimeUnixNano) +} + +// SetEndTimestamp replaces the endtimestamp associated with this Span. +func (ms Span) SetEndTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.EndTimeUnixNano = uint64(v) +} + +// Attributes returns the Attributes associated with this Span. +func (ms Span) Attributes() pcommon.Map { + return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this Span. +func (ms Span) DroppedAttributesCount() uint32 { + return ms.orig.DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this Span. +func (ms Span) SetDroppedAttributesCount(v uint32) { + ms.state.AssertMutable() + ms.orig.DroppedAttributesCount = v +} + +// Events returns the Events associated with this Span. +func (ms Span) Events() SpanEventSlice { + return newSpanEventSlice(&ms.orig.Events, ms.state) +} + +// DroppedEventsCount returns the droppedeventscount associated with this Span. +func (ms Span) DroppedEventsCount() uint32 { + return ms.orig.DroppedEventsCount +} + +// SetDroppedEventsCount replaces the droppedeventscount associated with this Span. +func (ms Span) SetDroppedEventsCount(v uint32) { + ms.state.AssertMutable() + ms.orig.DroppedEventsCount = v +} + +// Links returns the Links associated with this Span. +func (ms Span) Links() SpanLinkSlice { + return newSpanLinkSlice(&ms.orig.Links, ms.state) +} + +// DroppedLinksCount returns the droppedlinkscount associated with this Span. +func (ms Span) DroppedLinksCount() uint32 { + return ms.orig.DroppedLinksCount +} + +// SetDroppedLinksCount replaces the droppedlinkscount associated with this Span. +func (ms Span) SetDroppedLinksCount(v uint32) { + ms.state.AssertMutable() + ms.orig.DroppedLinksCount = v +} + +// Status returns the status associated with this Span. +func (ms Span) Status() Status { + return newStatus(&ms.orig.Status, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Span) CopyTo(dest Span) { + dest.state.AssertMutable() + dest.SetTraceID(ms.TraceID()) + dest.SetSpanID(ms.SpanID()) + ms.TraceState().CopyTo(dest.TraceState()) + dest.SetParentSpanID(ms.ParentSpanID()) + dest.SetName(ms.Name()) + dest.SetFlags(ms.Flags()) + dest.SetKind(ms.Kind()) + dest.SetStartTimestamp(ms.StartTimestamp()) + dest.SetEndTimestamp(ms.EndTimestamp()) + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) + ms.Events().CopyTo(dest.Events()) + dest.SetDroppedEventsCount(ms.DroppedEventsCount()) + ms.Links().CopyTo(dest.Links()) + dest.SetDroppedLinksCount(ms.DroppedLinksCount()) + ms.Status().CopyTo(dest.Status()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go new file mode 100644 index 00000000000..a35c88ae93d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package ptrace + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. See OTLP for event definition. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSpanEvent function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SpanEvent struct { + orig *otlptrace.Span_Event + state *internal.State +} + +func newSpanEvent(orig *otlptrace.Span_Event, state *internal.State) SpanEvent { + return SpanEvent{orig: orig, state: state} +} + +// NewSpanEvent creates a new empty SpanEvent. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewSpanEvent() SpanEvent { + state := internal.StateMutable + return newSpanEvent(&otlptrace.Span_Event{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms SpanEvent) MoveTo(dest SpanEvent) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlptrace.Span_Event{} +} + +// Timestamp returns the timestamp associated with this SpanEvent. +func (ms SpanEvent) Timestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this SpanEvent. +func (ms SpanEvent) SetTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.TimeUnixNano = uint64(v) +} + +// Name returns the name associated with this SpanEvent. +func (ms SpanEvent) Name() string { + return ms.orig.Name +} + +// SetName replaces the name associated with this SpanEvent. +func (ms SpanEvent) SetName(v string) { + ms.state.AssertMutable() + ms.orig.Name = v +} + +// Attributes returns the Attributes associated with this SpanEvent. +func (ms SpanEvent) Attributes() pcommon.Map { + return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this SpanEvent. +func (ms SpanEvent) DroppedAttributesCount() uint32 { + return ms.orig.DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this SpanEvent. +func (ms SpanEvent) SetDroppedAttributesCount(v uint32) { + ms.state.AssertMutable() + ms.orig.DroppedAttributesCount = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms SpanEvent) CopyTo(dest SpanEvent) { + dest.state.AssertMutable() + dest.SetTimestamp(ms.Timestamp()) + dest.SetName(ms.Name()) + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go new file mode 100644 index 00000000000..ffde17c83a2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package ptrace + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" +) + +// SpanEventSlice logically represents a slice of SpanEvent. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewSpanEventSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SpanEventSlice struct { + orig *[]*otlptrace.Span_Event + state *internal.State +} + +func newSpanEventSlice(orig *[]*otlptrace.Span_Event, state *internal.State) SpanEventSlice { + return SpanEventSlice{orig: orig, state: state} +} + +// NewSpanEventSlice creates a SpanEventSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewSpanEventSlice() SpanEventSlice { + orig := []*otlptrace.Span_Event(nil) + state := internal.StateMutable + return newSpanEventSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewSpanEventSlice()". +func (es SpanEventSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es SpanEventSlice) At(i int) SpanEvent { + return newSpanEvent((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new SpanEventSlice can be initialized: +// +// es := NewSpanEventSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es SpanEventSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlptrace.Span_Event, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty SpanEvent. +// It returns the newly added SpanEvent. +func (es SpanEventSlice) AppendEmpty() SpanEvent { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlptrace.Span_Event{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es SpanEventSlice) MoveAndAppendTo(dest SpanEventSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es SpanEventSlice) RemoveIf(f func(SpanEvent) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es SpanEventSlice) CopyTo(dest SpanEventSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newSpanEvent((*es.orig)[i], es.state).CopyTo(newSpanEvent((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlptrace.Span_Event, srcLen) + wrappers := make([]*otlptrace.Span_Event, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newSpanEvent((*es.orig)[i], es.state).CopyTo(newSpanEvent(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the SpanEvent elements within SpanEventSlice given the +// provided less function so that two instances of SpanEventSlice +// can be compared. +func (es SpanEventSlice) Sort(less func(a, b SpanEvent) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go new file mode 100644 index 00000000000..3121e263e23 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go @@ -0,0 +1,115 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package ptrace + +import ( + "go.opentelemetry.io/collector/pdata/internal" + "go.opentelemetry.io/collector/pdata/internal/data" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// SpanLink is a pointer from the current span to another span in the same trace or in a +// different trace. +// See Link definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSpanLink function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SpanLink struct { + orig *otlptrace.Span_Link + state *internal.State +} + +func newSpanLink(orig *otlptrace.Span_Link, state *internal.State) SpanLink { + return SpanLink{orig: orig, state: state} +} + +// NewSpanLink creates a new empty SpanLink. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewSpanLink() SpanLink { + state := internal.StateMutable + return newSpanLink(&otlptrace.Span_Link{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms SpanLink) MoveTo(dest SpanLink) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlptrace.Span_Link{} +} + +// TraceID returns the traceid associated with this SpanLink. +func (ms SpanLink) TraceID() pcommon.TraceID { + return pcommon.TraceID(ms.orig.TraceId) +} + +// SetTraceID replaces the traceid associated with this SpanLink. +func (ms SpanLink) SetTraceID(v pcommon.TraceID) { + ms.state.AssertMutable() + ms.orig.TraceId = data.TraceID(v) +} + +// SpanID returns the spanid associated with this SpanLink. +func (ms SpanLink) SpanID() pcommon.SpanID { + return pcommon.SpanID(ms.orig.SpanId) +} + +// SetSpanID replaces the spanid associated with this SpanLink. +func (ms SpanLink) SetSpanID(v pcommon.SpanID) { + ms.state.AssertMutable() + ms.orig.SpanId = data.SpanID(v) +} + +// TraceState returns the tracestate associated with this SpanLink. +func (ms SpanLink) TraceState() pcommon.TraceState { + return pcommon.TraceState(internal.NewTraceState(&ms.orig.TraceState, ms.state)) +} + +// Flags returns the flags associated with this SpanLink. +func (ms SpanLink) Flags() uint32 { + return ms.orig.Flags +} + +// SetFlags replaces the flags associated with this SpanLink. +func (ms SpanLink) SetFlags(v uint32) { + ms.state.AssertMutable() + ms.orig.Flags = v +} + +// Attributes returns the Attributes associated with this SpanLink. +func (ms SpanLink) Attributes() pcommon.Map { + return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this SpanLink. +func (ms SpanLink) DroppedAttributesCount() uint32 { + return ms.orig.DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this SpanLink. +func (ms SpanLink) SetDroppedAttributesCount(v uint32) { + ms.state.AssertMutable() + ms.orig.DroppedAttributesCount = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms SpanLink) CopyTo(dest SpanLink) { + dest.state.AssertMutable() + dest.SetTraceID(ms.TraceID()) + dest.SetSpanID(ms.SpanID()) + ms.TraceState().CopyTo(dest.TraceState()) + dest.SetFlags(ms.Flags()) + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go new file mode 100644 index 00000000000..164038b8bed --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package ptrace + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" +) + +// SpanLinkSlice logically represents a slice of SpanLink. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewSpanLinkSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SpanLinkSlice struct { + orig *[]*otlptrace.Span_Link + state *internal.State +} + +func newSpanLinkSlice(orig *[]*otlptrace.Span_Link, state *internal.State) SpanLinkSlice { + return SpanLinkSlice{orig: orig, state: state} +} + +// NewSpanLinkSlice creates a SpanLinkSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewSpanLinkSlice() SpanLinkSlice { + orig := []*otlptrace.Span_Link(nil) + state := internal.StateMutable + return newSpanLinkSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewSpanLinkSlice()". +func (es SpanLinkSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es SpanLinkSlice) At(i int) SpanLink { + return newSpanLink((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new SpanLinkSlice can be initialized: +// +// es := NewSpanLinkSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es SpanLinkSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlptrace.Span_Link, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty SpanLink. +// It returns the newly added SpanLink. +func (es SpanLinkSlice) AppendEmpty() SpanLink { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlptrace.Span_Link{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es SpanLinkSlice) MoveAndAppendTo(dest SpanLinkSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es SpanLinkSlice) RemoveIf(f func(SpanLink) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es SpanLinkSlice) CopyTo(dest SpanLinkSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newSpanLink((*es.orig)[i], es.state).CopyTo(newSpanLink((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlptrace.Span_Link, srcLen) + wrappers := make([]*otlptrace.Span_Link, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newSpanLink((*es.orig)[i], es.state).CopyTo(newSpanLink(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the SpanLink elements within SpanLinkSlice given the +// provided less function so that two instances of SpanLinkSlice +// can be compared. +func (es SpanLinkSlice) Sort(less func(a, b SpanLink) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go new file mode 100644 index 00000000000..654a547523f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package ptrace + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" +) + +// SpanSlice logically represents a slice of Span. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewSpanSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SpanSlice struct { + orig *[]*otlptrace.Span + state *internal.State +} + +func newSpanSlice(orig *[]*otlptrace.Span, state *internal.State) SpanSlice { + return SpanSlice{orig: orig, state: state} +} + +// NewSpanSlice creates a SpanSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewSpanSlice() SpanSlice { + orig := []*otlptrace.Span(nil) + state := internal.StateMutable + return newSpanSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewSpanSlice()". +func (es SpanSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es SpanSlice) At(i int) Span { + return newSpan((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new SpanSlice can be initialized: +// +// es := NewSpanSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es SpanSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlptrace.Span, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Span. +// It returns the newly added Span. +func (es SpanSlice) AppendEmpty() Span { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlptrace.Span{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es SpanSlice) MoveAndAppendTo(dest SpanSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es SpanSlice) RemoveIf(f func(Span) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es SpanSlice) CopyTo(dest SpanSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newSpan((*es.orig)[i], es.state).CopyTo(newSpan((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlptrace.Span, srcLen) + wrappers := make([]*otlptrace.Span, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newSpan((*es.orig)[i], es.state).CopyTo(newSpan(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Span elements within SpanSlice given the +// provided less function so that two instances of SpanSlice +// can be compared. +func (es SpanSlice) Sort(less func(a, b Span) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go new file mode 100644 index 00000000000..2b3e66a92d0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package ptrace + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" +) + +// Status is an optional final status for this span. Semantically, when Status was not +// set, that means the span ended without errors and to assume Status.Ok (code = 0). +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewStatus function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Status struct { + orig *otlptrace.Status + state *internal.State +} + +func newStatus(orig *otlptrace.Status, state *internal.State) Status { + return Status{orig: orig, state: state} +} + +// NewStatus creates a new empty Status. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewStatus() Status { + state := internal.StateMutable + return newStatus(&otlptrace.Status{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Status) MoveTo(dest Status) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlptrace.Status{} +} + +// Code returns the code associated with this Status. +func (ms Status) Code() StatusCode { + return StatusCode(ms.orig.Code) +} + +// SetCode replaces the code associated with this Status. +func (ms Status) SetCode(v StatusCode) { + ms.state.AssertMutable() + ms.orig.Code = otlptrace.Status_StatusCode(v) +} + +// Message returns the message associated with this Status. +func (ms Status) Message() string { + return ms.orig.Message +} + +// SetMessage replaces the message associated with this Status. +func (ms Status) SetMessage(v string) { + ms.state.AssertMutable() + ms.orig.Message = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Status) CopyTo(dest Status) { + dest.state.AssertMutable() + dest.SetCode(ms.Code()) + dest.SetMessage(ms.Message()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go new file mode 100644 index 00000000000..2e35a95913a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go @@ -0,0 +1,217 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" + +import ( + "bytes" + "fmt" + + jsoniter "github.com/json-iterator/go" + + "go.opentelemetry.io/collector/pdata/internal" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/otlp" +) + +// JSONMarshaler marshals pdata.Traces to JSON bytes using the OTLP/JSON format. +type JSONMarshaler struct{} + +// MarshalTraces to the OTLP/JSON format. +func (*JSONMarshaler) MarshalTraces(td Traces) ([]byte, error) { + buf := bytes.Buffer{} + pb := internal.TracesToProto(internal.Traces(td)) + err := json.Marshal(&buf, &pb) + return buf.Bytes(), err +} + +// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pdata.Traces. +type JSONUnmarshaler struct{} + +// UnmarshalTraces from OTLP/JSON format into pdata.Traces. +func (*JSONUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) { + iter := jsoniter.ConfigFastest.BorrowIterator(buf) + defer jsoniter.ConfigFastest.ReturnIterator(iter) + td := NewTraces() + td.unmarshalJsoniter(iter) + if iter.Error != nil { + return Traces{}, iter.Error + } + otlp.MigrateTraces(td.getOrig().ResourceSpans) + return td, nil +} + +func (ms Traces) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resourceSpans", "resource_spans": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.ResourceSpans().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (ms ResourceSpans) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resource": + json.ReadResource(iter, internal.GetOrigResource(internal.Resource(ms.Resource()))) + case "scopeSpans", "scope_spans": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.ScopeSpans().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + ms.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (ms ScopeSpans) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "scope": + json.ReadScope(iter, &ms.orig.Scope) + case "spans": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.Spans().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + ms.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (dest Span) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "traceId", "trace_id": + if err := dest.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("readSpan.traceId", fmt.Sprintf("parse trace_id:%v", err)) + } + case "spanId", "span_id": + if err := dest.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("readSpan.spanId", fmt.Sprintf("parse span_id:%v", err)) + } + case "traceState", "trace_state": + dest.TraceState().FromRaw(iter.ReadString()) + case "parentSpanId", "parent_span_id": + if err := dest.orig.ParentSpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("readSpan.parentSpanId", fmt.Sprintf("parse parent_span_id:%v", err)) + } + case "flags": + dest.orig.Flags = json.ReadUint32(iter) + case "name": + dest.orig.Name = iter.ReadString() + case "kind": + dest.orig.Kind = otlptrace.Span_SpanKind(json.ReadEnumValue(iter, otlptrace.Span_SpanKind_value)) + case "startTimeUnixNano", "start_time_unix_nano": + dest.orig.StartTimeUnixNano = json.ReadUint64(iter) + case "endTimeUnixNano", "end_time_unix_nano": + dest.orig.EndTimeUnixNano = json.ReadUint64(iter) + case "attributes": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + dest.orig.Attributes = append(dest.orig.Attributes, json.ReadAttribute(iter)) + return true + }) + case "droppedAttributesCount", "dropped_attributes_count": + dest.orig.DroppedAttributesCount = json.ReadUint32(iter) + case "events": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + dest.Events().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "droppedEventsCount", "dropped_events_count": + dest.orig.DroppedEventsCount = json.ReadUint32(iter) + case "links": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + dest.Links().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "droppedLinksCount", "dropped_links_count": + dest.orig.DroppedLinksCount = json.ReadUint32(iter) + case "status": + dest.Status().unmarshalJsoniter(iter) + default: + iter.Skip() + } + return true + }) +} + +func (dest Status) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "message": + dest.orig.Message = iter.ReadString() + case "code": + dest.orig.Code = otlptrace.Status_StatusCode(json.ReadEnumValue(iter, otlptrace.Status_StatusCode_value)) + default: + iter.Skip() + } + return true + }) +} + +func (dest SpanLink) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "traceId", "trace_id": + if err := dest.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("readSpanLink", fmt.Sprintf("parse trace_id:%v", err)) + } + case "spanId", "span_id": + if err := dest.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("readSpanLink", fmt.Sprintf("parse span_id:%v", err)) + } + case "traceState", "trace_state": + dest.orig.TraceState = iter.ReadString() + case "attributes": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + dest.orig.Attributes = append(dest.orig.Attributes, json.ReadAttribute(iter)) + return true + }) + case "droppedAttributesCount", "dropped_attributes_count": + dest.orig.DroppedAttributesCount = json.ReadUint32(iter) + case "flags": + dest.orig.Flags = json.ReadUint32(iter) + default: + iter.Skip() + } + return true + }) +} + +func (dest SpanEvent) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "timeUnixNano", "time_unix_nano": + dest.orig.TimeUnixNano = json.ReadUint64(iter) + case "name": + dest.orig.Name = iter.ReadString() + case "attributes": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + dest.orig.Attributes = append(dest.orig.Attributes, json.ReadAttribute(iter)) + return true + }) + case "droppedAttributesCount", "dropped_attributes_count": + dest.orig.DroppedAttributesCount = json.ReadUint32(iter) + default: + iter.Skip() + } + return true + }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go new file mode 100644 index 00000000000..e0b2168884a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" +) + +var _ MarshalSizer = (*ProtoMarshaler)(nil) + +type ProtoMarshaler struct{} + +func (e *ProtoMarshaler) MarshalTraces(td Traces) ([]byte, error) { + pb := internal.TracesToProto(internal.Traces(td)) + return pb.Marshal() +} + +func (e *ProtoMarshaler) TracesSize(td Traces) int { + pb := internal.TracesToProto(internal.Traces(td)) + return pb.Size() +} + +type ProtoUnmarshaler struct{} + +func (d *ProtoUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) { + pb := otlptrace.TracesData{} + err := pb.Unmarshal(buf) + return Traces(internal.TracesFromProto(pb)), err +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go new file mode 100644 index 00000000000..561d82cfffa --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go @@ -0,0 +1,54 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" + +import ( + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // SpanKindUnspecified represents that the SpanKind is unspecified, it MUST NOT be used. + SpanKindUnspecified = SpanKind(otlptrace.Span_SPAN_KIND_UNSPECIFIED) + // SpanKindInternal indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal = SpanKind(otlptrace.Span_SPAN_KIND_INTERNAL) + // SpanKindServer indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer = SpanKind(otlptrace.Span_SPAN_KIND_SERVER) + // SpanKindClient indicates that the span describes a request to some remote service. + SpanKindClient = SpanKind(otlptrace.Span_SPAN_KIND_CLIENT) + // SpanKindProducer indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. + // A PRODUCER span ends when the message was accepted by the broker while the logical processing of + // the message might span a much longer time. + SpanKindProducer = SpanKind(otlptrace.Span_SPAN_KIND_PRODUCER) + // SpanKindConsumer indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship between + // producer and consumer spans. + SpanKindConsumer = SpanKind(otlptrace.Span_SPAN_KIND_CONSUMER) +) + +// String returns the string representation of the SpanKind. +func (sk SpanKind) String() string { + switch sk { + case SpanKindUnspecified: + return "Unspecified" + case SpanKindInternal: + return "Internal" + case SpanKindServer: + return "Server" + case SpanKindClient: + return "Client" + case SpanKindProducer: + return "Producer" + case SpanKindConsumer: + return "Consumer" + } + return "" +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go new file mode 100644 index 00000000000..18a21f56ba8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" + +import ( + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" +) + +// StatusCode mirrors the codes defined at +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + StatusCodeUnset = StatusCode(otlptrace.Status_STATUS_CODE_UNSET) + StatusCodeOk = StatusCode(otlptrace.Status_STATUS_CODE_OK) + StatusCodeError = StatusCode(otlptrace.Status_STATUS_CODE_ERROR) +) + +// String returns the string representation of the StatusCode. +func (sc StatusCode) String() string { + switch sc { + case StatusCodeUnset: + return "Unset" + case StatusCodeOk: + return "Ok" + case StatusCodeError: + return "Error" + } + return "" +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/traces.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/traces.go new file mode 100644 index 00000000000..a4b71e17853 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/traces.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" +) + +// Traces is the top-level struct that is propagated through the traces pipeline. +// Use NewTraces to create new instance, zero-initialized instance is not valid for use. +type Traces internal.Traces + +func newTraces(orig *otlpcollectortrace.ExportTraceServiceRequest) Traces { + state := internal.StateMutable + return Traces(internal.NewTraces(orig, &state)) +} + +func (ms Traces) getOrig() *otlpcollectortrace.ExportTraceServiceRequest { + return internal.GetOrigTraces(internal.Traces(ms)) +} + +func (ms Traces) getState() *internal.State { + return internal.GetTracesState(internal.Traces(ms)) +} + +// NewTraces creates a new Traces struct. +func NewTraces() Traces { + return newTraces(&otlpcollectortrace.ExportTraceServiceRequest{}) +} + +// IsReadOnly returns true if this Traces instance is read-only. +func (ms Traces) IsReadOnly() bool { + return *ms.getState() == internal.StateReadOnly +} + +// CopyTo copies the Traces instance overriding the destination. +func (ms Traces) CopyTo(dest Traces) { + ms.ResourceSpans().CopyTo(dest.ResourceSpans()) +} + +// SpanCount calculates the total number of spans. +func (ms Traces) SpanCount() int { + spanCount := 0 + rss := ms.ResourceSpans() + for i := 0; i < rss.Len(); i++ { + rs := rss.At(i) + ilss := rs.ScopeSpans() + for j := 0; j < ilss.Len(); j++ { + spanCount += ilss.At(j).Spans().Len() + } + } + return spanCount +} + +// ResourceSpans returns the ResourceSpansSlice associated with this Metrics. +func (ms Traces) ResourceSpans() ResourceSpansSlice { + return newResourceSpansSlice(&ms.getOrig().ResourceSpans, internal.GetTracesState(internal.Traces(ms))) +} + +// MarkReadOnly marks the Traces as shared so that no further modifications can be done on it. +func (ms Traces) MarkReadOnly() { + internal.SetTracesState(internal.Traces(ms), internal.StateReadOnly) +} diff --git a/vendor/go.opentelemetry.io/collector/pipeline/LICENSE b/vendor/go.opentelemetry.io/collector/pipeline/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/pipeline/Makefile b/vendor/go.opentelemetry.io/collector/pipeline/Makefile new file mode 100644 index 00000000000..39734bfaebb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/Makefile @@ -0,0 +1 @@ +include ../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/pipeline/internal/globalsignal/signal.go b/vendor/go.opentelemetry.io/collector/pipeline/internal/globalsignal/signal.go new file mode 100644 index 00000000000..dd7c6ec623c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/internal/globalsignal/signal.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package globalsignal // import "go.opentelemetry.io/collector/pipeline/internal/globalsignal" + +import ( + "errors" + "fmt" + "regexp" +) + +// Signal represents the signals supported by the collector. +type Signal struct { + name string +} + +// String returns the string representation of the signal. +func (s Signal) String() string { + return s.name +} + +// MarshalText marshals the Signal. +func (s Signal) MarshalText() (text []byte, err error) { + return []byte(s.name), nil +} + +// signalRegex is used to validate the signal. +// A signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +var signalRegex = regexp.MustCompile(`^[a-z]{1,62}$`) + +// NewSignal creates a Signal. It returns an error if the Signal is invalid. +// A Signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +func NewSignal(signal string) (Signal, error) { + if len(signal) == 0 { + return Signal{}, errors.New("signal must not be empty") + } + if !signalRegex.MatchString(signal) { + return Signal{}, fmt.Errorf("invalid character(s) in type %q", signal) + } + return Signal{name: signal}, nil +} + +// MustNewSignal creates a Signal. It panics if the Signal is invalid. +// A signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +func MustNewSignal(signal string) Signal { + s, err := NewSignal(signal) + if err != nil { + panic(err) + } + return s +} diff --git a/vendor/go.opentelemetry.io/collector/pipeline/pipeline.go b/vendor/go.opentelemetry.io/collector/pipeline/pipeline.go new file mode 100644 index 00000000000..aa2d3d0d0ad --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/pipeline.go @@ -0,0 +1,131 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pipeline // import "go.opentelemetry.io/collector/pipeline" +import ( + "errors" + "fmt" + "regexp" + "strings" + + "go.opentelemetry.io/collector/pipeline/internal/globalsignal" +) + +// typeAndNameSeparator is the separator that is used between type and name in type/name composite keys. +const typeAndNameSeparator = "/" + +// ID represents the identity for a pipeline. It combines two values: +// * signal - the Signal of the pipeline. +// * name - the name of that pipeline. +type ID struct { + signal Signal `mapstructure:"-"` + name string `mapstructure:"-"` +} + +// NewID returns a new ID with the given Signal and empty name. +func NewID(signal Signal) ID { + return ID{signal: signal} +} + +// MustNewID builds a Signal and returns a new ID with the given Signal and empty name. +// It panics if the Signal is invalid. +// A signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +func MustNewID(signal string) ID { + return ID{signal: globalsignal.MustNewSignal(signal)} +} + +// NewIDWithName returns a new ID with the given Signal and name. +func NewIDWithName(signal Signal, name string) ID { + return ID{signal: signal, name: name} +} + +// MustNewIDWithName builds a Signal and returns a new ID with the given Signal and name. +// It panics if the Signal is invalid or name is invalid. +// A signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +// A name must consist of 1 to 1024 unicode characters excluding whitespace, control characters, and symbols. +func MustNewIDWithName(signal string, name string) ID { + id := ID{signal: globalsignal.MustNewSignal(signal)} + err := validateName(name) + if err != nil { + panic(err) + } + id.name = name + return id +} + +// Signal returns the Signal of the ID. +func (i ID) Signal() Signal { + return i.signal +} + +// Name returns the name of the ID. +func (i ID) Name() string { + return i.name +} + +// MarshalText implements the encoding.TextMarshaler interface. +// This marshals the Signal and name as one string in the config. +func (i ID) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (i *ID) UnmarshalText(text []byte) error { + idStr := string(text) + items := strings.SplitN(idStr, typeAndNameSeparator, 2) + var signalStr, nameStr string + if len(items) >= 1 { + signalStr = strings.TrimSpace(items[0]) + } + + if len(items) == 1 && signalStr == "" { + return errors.New("id must not be empty") + } + + if signalStr == "" { + return fmt.Errorf("in %q id: the part before %s should not be empty", idStr, typeAndNameSeparator) + } + + if len(items) > 1 { + // "name" part is present. + nameStr = strings.TrimSpace(items[1]) + if nameStr == "" { + return fmt.Errorf("in %q id: the part after %s should not be empty", idStr, typeAndNameSeparator) + } + if err := validateName(nameStr); err != nil { + return fmt.Errorf("in %q id: %w", nameStr, err) + } + } + + var err error + if i.signal, err = globalsignal.NewSignal(signalStr); err != nil { + return fmt.Errorf("in %q id: %w", idStr, err) + } + i.name = nameStr + + return nil +} + +// String returns the ID string representation as "signal[/name]" format. +func (i ID) String() string { + if i.name == "" { + return i.signal.String() + } + + return i.signal.String() + typeAndNameSeparator + i.name +} + +// nameRegexp is used to validate the name of an ID. A name can consist of +// 1 to 1024 unicode characters excluding whitespace, control characters, and +// symbols. +var nameRegexp = regexp.MustCompile(`^[^\pZ\pC\pS]+$`) + +func validateName(nameStr string) error { + if len(nameStr) > 1024 { + return fmt.Errorf("name %q is longer than 1024 characters (%d characters)", nameStr, len(nameStr)) + } + if !nameRegexp.MatchString(nameStr) { + return fmt.Errorf("invalid character(s) in name %q", nameStr) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/pipeline/signal.go b/vendor/go.opentelemetry.io/collector/pipeline/signal.go new file mode 100644 index 00000000000..77376c999ad --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/signal.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pipeline // import "go.opentelemetry.io/collector/pipeline" + +import ( + "errors" + + "go.opentelemetry.io/collector/pipeline/internal/globalsignal" +) + +// Signal represents the signals supported by the collector. We currently support +// collecting metrics, traces and logs, this can expand in the future. +type Signal = globalsignal.Signal + +var ErrSignalNotSupported = errors.New("telemetry type is not supported") + +var ( + SignalTraces = globalsignal.MustNewSignal("traces") + SignalMetrics = globalsignal.MustNewSignal("metrics") + SignalLogs = globalsignal.MustNewSignal("logs") +) diff --git a/vendor/go.opentelemetry.io/collector/processor/LICENSE b/vendor/go.opentelemetry.io/collector/processor/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/processor/Makefile b/vendor/go.opentelemetry.io/collector/processor/Makefile new file mode 100644 index 00000000000..39734bfaebb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/Makefile @@ -0,0 +1 @@ +include ../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/processor/README.md b/vendor/go.opentelemetry.io/collector/processor/README.md new file mode 100644 index 00000000000..f52dbc10374 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/README.md @@ -0,0 +1,109 @@ +# General Information + +Processors are used at various stages of a pipeline. Generally, a processor +pre-processes data before it is exported (e.g. modify attributes or sample) or +helps ensure that data makes it through a pipeline successfully (e.g. +batch/retry). + +Some important aspects of pipelines and processors to be aware of: +- [Recommended Processors](#recommended-processors) +- [Data Ownership](#data-ownership) +- [Exclusive Ownership](#exclusive-ownership) +- [Shared Ownership](#shared-ownership) +- [Ordering Processors](#ordering-processors) + +Supported processors (sorted alphabetically): +- [Batch Processor](batchprocessor/README.md) +- [Memory Limiter Processor](memorylimiterprocessor/README.md) + +The [contrib repository](https://github.com/open-telemetry/opentelemetry-collector-contrib) + has more processors that can be added to a custom build of the Collector. + +## Recommended Processors + +By default, no processors are enabled. Depending on the data source, it may be +recommended that multiple processors be enabled. Processors must be enabled +for every data source and not all processors support all data sources. +In addition, it is important to note that the order of processors matters. The +order in each section below is the best practice. Refer to the individual +processor documentation for more information. + +1. [memory_limiter](memorylimiterprocessor/README.md) +2. Any sampling or initial filtering processors +3. Any processor relying on sending source from `Context` (e.g. `k8sattributes`) +3. [batch](batchprocessor/README.md) +4. Any other processors + +## Data Ownership + +The ownership of the `pdata.Traces`, `pdata.Metrics` and `pdata.Logs` data in a pipeline +is passed as the data travels through the pipeline. The data is created by the receiver +and then the ownership is passed to the first processor when `ConsumeTraces`/`ConsumeMetrics`/`ConsumeLogs` +function is called. + +Note: the receiver may be attached to multiple pipelines, in which case the same data +will be passed to all attached pipelines via a data fan-out connector. + +From data ownership perspective pipelines can work in 2 modes: +* Exclusive data ownership +* Shared data ownership + +The mode is defined during startup based on data modification intent reported by the +processors. The intent is reported by each processor via `MutatesData` field of +the struct returned by `Capabilities` function. If any processor in the pipeline +declares an intent to modify the data then that pipeline will work in exclusive ownership +mode. In addition, any other pipeline that receives data from a receiver that is attached +to a pipeline with exclusive ownership mode will be also operating in exclusive ownership +mode. + +### Exclusive Ownership + +In exclusive ownership mode the data is owned exclusively by a particular processor at a +given moment of time, and the processor is free to modify the data it owns. + +Exclusive ownership mode is only applicable for pipelines that receive data from the +same receiver. If a pipeline is marked to be in exclusive ownership mode then any data +received from a shared receiver will be cloned at the fan-out connector before passing +further to each pipeline. This ensures that each pipeline has its own exclusive copy of +data, and the data can be safely modified in the pipeline. + +The exclusive ownership of data allows processors to freely modify the data while +they own it (e.g. see `attributesprocessor`). The duration of ownership of the data +by processor is from the beginning of `ConsumeTraces`/`ConsumeMetrics`/`ConsumeLogs` +call until the processor calls the next processor's `ConsumeTraces`/`ConsumeMetrics`/`ConsumeLogs` +function, which passes the ownership to the next processor. After that the processor +must no longer read or write the data since it may be concurrently modified by the +new owner. + +Exclusive Ownership mode allows to easily implement processors that need to modify +the data by simply declaring such intent. + +### Shared Ownership + +In shared ownership mode no particular processor owns the data and no processor is +allowed the modify the shared data. + +In this mode no cloning is performed at the fan-out connector of receivers that +are attached to multiple pipelines. In this case all such pipelines will see +the same single shared copy of the data. Processors in pipelines operating in shared +ownership mode are prohibited from modifying the original data that they receive +via `ConsumeTraces`/`ConsumeMetrics`/`ConsumeLogs` call. Processors may only read +the data but must not modify the data. + +If the processor needs to modify the data while performing the processing but +does not want to incur the cost of data cloning that Exclusive mode brings then +the processor can declare that it does not modify the data and use any +different technique that ensures original data is not modified. For example, +the processor can implement copy-on-write approach for individual sub-parts of +`pdata.Traces`/`pdata.Metrics`/`pdata.Logs` argument. Any approach that does not +mutate the original `pdata.Traces`/`pdata.Metrics`/`pdata.Logs` is allowed. + +If the processor uses such technique it should declare that it does not intend +to modify the original data by setting `MutatesData=false` in its capabilities +to avoid marking the pipeline for Exclusive ownership and to avoid the cost of +data cloning described in Exclusive Ownership section. + +## Ordering Processors + +The order processors are specified in a pipeline is important as this is the +order in which each processor is applied. diff --git a/vendor/go.opentelemetry.io/collector/processor/processor.go b/vendor/go.opentelemetry.io/collector/processor/processor.go new file mode 100644 index 00000000000..8ecd4d497c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/processor.go @@ -0,0 +1,205 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package processor // import "go.opentelemetry.io/collector/processor" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pipeline" +) + +// Traces is a processor that can consume traces. +type Traces interface { + component.Component + consumer.Traces +} + +// Metrics is a processor that can consume metrics. +type Metrics interface { + component.Component + consumer.Metrics +} + +// Logs is a processor that can consume logs. +type Logs interface { + component.Component + consumer.Logs +} + +// Settings is passed to Create* functions in Factory. +type Settings struct { + // ID returns the ID of the component that will be created. + ID component.ID + + component.TelemetrySettings + + // BuildInfo can be used by components for informational purposes + BuildInfo component.BuildInfo +} + +// Factory is Factory interface for processors. +// +// This interface cannot be directly implemented. Implementations must +// use the NewFactory to implement it. +type Factory interface { + component.Factory + + // CreateTraces creates a Traces processor based on this config. + // If the processor type does not support traces, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) + + // TracesStability gets the stability level of the Traces processor. + TracesStability() component.StabilityLevel + + // CreateMetrics creates a Metrics processor based on this config. + // If the processor type does not support metrics, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) + + // MetricsStability gets the stability level of the Metrics processor. + MetricsStability() component.StabilityLevel + + // CreateLogs creates a Logs processor based on the config. + // If the processor type does not support logs, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) + + // LogsStability gets the stability level of the Logs processor. + LogsStability() component.StabilityLevel + + unexportedFactoryFunc() +} + +// FactoryOption apply changes to Options. +type FactoryOption interface { + // applyOption applies the option. + applyOption(o *factory) +} + +var _ FactoryOption = (*factoryOptionFunc)(nil) + +// factoryOptionFunc is a FactoryOption created through a function. +type factoryOptionFunc func(*factory) + +func (f factoryOptionFunc) applyOption(o *factory) { + f(o) +} + +type factory struct { + cfgType component.Type + component.CreateDefaultConfigFunc + CreateTracesFunc + tracesStabilityLevel component.StabilityLevel + CreateMetricsFunc + metricsStabilityLevel component.StabilityLevel + CreateLogsFunc + logsStabilityLevel component.StabilityLevel +} + +func (f *factory) Type() component.Type { + return f.cfgType +} + +func (f *factory) unexportedFactoryFunc() {} + +func (f *factory) TracesStability() component.StabilityLevel { + return f.tracesStabilityLevel +} + +func (f *factory) MetricsStability() component.StabilityLevel { + return f.metricsStabilityLevel +} + +func (f *factory) LogsStability() component.StabilityLevel { + return f.logsStabilityLevel +} + +// CreateTracesFunc is the equivalent of Factory.CreateTraces(). +type CreateTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) + +// CreateTraces implements Factory.CreateTraces. +func (f CreateTracesFunc) CreateTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + +// CreateMetricsFunc is the equivalent of Factory.CreateMetrics(). +type CreateMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Metrics, error) + +// CreateMetrics implements Factory.CreateMetrics. +func (f CreateMetricsFunc) CreateMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + +// CreateLogsFunc is the equivalent of Factory.CreateLogs. +type CreateLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Logs, error) + +// CreateLogs implements Factory.CreateLogs(). +func (f CreateLogsFunc) CreateLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + +// WithTraces overrides the default "error not supported" implementation for CreateTraces and the default "undefined" stability level. +func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factory) { + o.tracesStabilityLevel = sl + o.CreateTracesFunc = createTraces + }) +} + +// WithMetrics overrides the default "error not supported" implementation for CreateMetrics and the default "undefined" stability level. +func WithMetrics(createMetrics CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factory) { + o.metricsStabilityLevel = sl + o.CreateMetricsFunc = createMetrics + }) +} + +// WithLogs overrides the default "error not supported" implementation for CreateLogs and the default "undefined" stability level. +func WithLogs(createLogs CreateLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factory) { + o.logsStabilityLevel = sl + o.CreateLogsFunc = createLogs + }) +} + +// NewFactory returns a Factory. +func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { + f := &factory{ + cfgType: cfgType, + CreateDefaultConfigFunc: createDefaultConfig, + } + for _, opt := range options { + opt.applyOption(f) + } + return f +} + +// MakeFactoryMap takes a list of factories and returns a map with Factory type as keys. +// It returns a non-nil error when there are factories with duplicate type. +func MakeFactoryMap(factories ...Factory) (map[component.Type]Factory, error) { + fMap := map[component.Type]Factory{} + for _, f := range factories { + if _, ok := fMap[f.Type()]; ok { + return fMap, fmt.Errorf("duplicate processor factory %q", f.Type()) + } + fMap[f.Type()] = f + } + return fMap, nil +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go index cf7e9db36bf..5eb8e43c57b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go @@ -5,7 +5,7 @@ package otelhttptrace // import "go.opentelemetry.io/contrib/instrumentation/net // Version is the current release version of the httptrace instrumentation. func Version() string { - return "0.58.0" + return "0.59.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index e555a475f13..3ea05d01995 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -21,15 +22,16 @@ type middleware struct { operation string server string - tracer trace.Tracer - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - readEvent bool - writeEvent bool - filters []Filter - spanNameFormatter func(string, *http.Request) string - publicEndpoint bool - publicEndpointFn func(*http.Request) bool + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + metricAttributesFn func(*http.Request) []attribute.KeyValue semconv semconv.HTTPServer } @@ -79,6 +81,7 @@ func (h *middleware) configure(c *config) { h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName h.semconv = semconv.NewHTTPServer(c.Meter) + h.metricAttributesFn = c.MetricAttributesFn } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -189,14 +192,16 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + metricAttributes := semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...), + } + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ - ServerName: h.server, - ResponseSize: bytesWritten, - MetricAttributes: semconv.MetricAttributes{ - Req: r, - StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - }, + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: metricAttributes, MetricData: semconv.MetricData{ RequestSize: bw.BytesRead(), ElapsedTime: elapsedTime, @@ -204,6 +209,14 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http }) } +func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if h.metricAttributesFn != nil { + attributeForRequest = h.metricAttributesFn(r) + } + return attributeForRequest +} + // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 3b036f8a37b..eaf4c379674 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/env.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -16,6 +19,10 @@ import ( "go.opentelemetry.io/otel/metric" ) +// OTelSemConvStabilityOptIn is an environment variable. +// That can be set to "old" or "http/dup" to opt into the new HTTP semantic conventions. +const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" + type ResponseTelemetry struct { StatusCode int ReadBytes int64 @@ -31,6 +38,11 @@ type HTTPServer struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter serverLatencyMeasure metric.Float64Histogram + + // New metrics + requestBodySizeHistogram metric.Int64Histogram + responseBodySizeHistogram metric.Int64Histogram + requestDurationHistogram metric.Float64Histogram } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -103,38 +115,56 @@ type MetricData struct { ElapsedTime float64 } -var metricAddOptionPool = &sync.Pool{ - New: func() interface{} { - return &[]metric.AddOption{} - }, -} +var ( + metricAddOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.AddOption{} + }, + } -func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { - if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { - // This will happen if an HTTPServer{} is used instead of NewHTTPServer. - return + metricRecordOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.RecordOption{} + }, } +) - attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) - *addOpts = append(*addOpts, o) - s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) - s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) - *addOpts = (*addOpts)[:0] - metricAddOptionPool.Put(addOpts) +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { + if s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil { + attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) + *addOpts = append(*addOpts, o) + s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + *addOpts = (*addOpts)[:0] + metricAddOptionPool.Put(addOpts) + } - // TODO: Duplicate Metrics + if s.duplicate && s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil { + attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) + *recordOpts = append(*recordOpts, o) + s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...) + s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...) + s.requestDurationHistogram.Record(ctx, md.ElapsedTime, o) + *recordOpts = (*recordOpts)[:0] + metricRecordOptionPool.Put(recordOpts) + } } func NewHTTPServer(meter metric.Meter) HTTPServer { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) duplicate := env == "http/dup" server := HTTPServer{ duplicate: duplicate, } server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) + if duplicate { + server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter) + } return server } @@ -145,14 +175,23 @@ type HTTPClient struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter latencyMeasure metric.Float64Histogram + + // new metrics + requestBodySize metric.Int64Histogram + requestDuration metric.Float64Histogram } func NewHTTPClient(meter metric.Meter) HTTPClient { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) + duplicate := env == "http/dup" client := HTTPClient{ - duplicate: env == "http/dup", + duplicate: duplicate, } client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) + if duplicate { + client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter) + } + return client } @@ -204,34 +243,48 @@ func (o MetricOpts) AddOptions() metric.AddOption { return o.addOptions } -func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { +func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { + opts := map[string]MetricOpts{} + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) - // TODO: Duplicate Metrics set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - return MetricOpts{ + opts["old"] = MetricOpts{ measurement: set, addOptions: set, } + + if c.duplicate { + attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + opts["new"] = MetricOpts{ + measurement: set, + addOptions: set, + } + } + + return opts } -func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { if s.requestBytesCounter == nil || s.latencyMeasure == nil { // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } - s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) - s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption()) - // TODO: Duplicate Metrics + if s.duplicate { + s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) + s.requestDuration.Record(ctx, md.ElapsedTime, opts["new"].MeasurementOption()) + } } -func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) { if s.responseBytesCounter == nil { // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } - s.responseBytesCounter.Add(ctx, responseData, opts) - // TODO: Duplicate Metrics + s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions()) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go new file mode 100644 index 00000000000..32630864bf2 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +// Generate semconv package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={}" --out=bench_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={}" --out=env.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={}" --out=env_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={}" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={}" --out=util.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={}" --out=util_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={}" --out=v1.20.0.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index dc9ec7bc39e..8c3c6275133 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/httpconv.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -7,10 +10,13 @@ import ( "fmt" "net/http" "reflect" + "slices" "strconv" "strings" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) @@ -199,6 +205,86 @@ func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } +func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription), + ) + handleErr(err) + + responseBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerResponseBodySizeName, + metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription), + ) + handleErr(err) + requestDurationHistogram, err := meter.Float64Histogram( + semconvNew.HTTPServerRequestDurationName, + metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription), + ) + handleErr(err) + + return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram +} + +func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + num++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + n.scheme(req.TLS != nil), + semconvNew.ServerAddress(host)) + + if hostPort > 0 { + attributes = append(attributes, semconvNew.ServerPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + type CurrentHTTPClient struct{} // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. @@ -343,6 +429,91 @@ func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute. return semconvNew.HTTPRequestMethodGet, orig } +func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySize, err := meter.Int64Histogram( + semconvNew.HTTPClientRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription), + ) + handleErr(err) + + requestDuration, err := meter.Float64Histogram( + semconvNew.HTTPClientRequestDurationName, + metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription), + ) + handleErr(err) + + return requestBodySize, requestDuration +} + +func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 2 + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + num++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + semconvNew.ServerAddress(requestHost), + n.scheme(req.TLS != nil), + ) + + if port > 0 { + attributes = append(attributes, semconvNew.ServerPort(port)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +func (n CurrentHTTPClient) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + func isErrorStatusCode(code int) bool { return code >= 400 || code < 100 } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index 93e8d0f94c1..558efd0594b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/util.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -96,3 +99,13 @@ func handleErr(err error) { otel.Handle(err) } } + +func standardizeHTTPMethod(method string) string { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return method +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c042249dd72..57d1507b620 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/v120.0.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -8,7 +11,6 @@ import ( "io" "net/http" "slices" - "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" @@ -144,7 +146,7 @@ func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - standardizeHTTPMethodMetric(req.Method), + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -214,7 +216,7 @@ func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, addit attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - standardizeHTTPMethodMetric(req.Method), + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), semconv.NetPeerName(requestHost), ) @@ -262,13 +264,3 @@ func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, latencyMeasure } - -func standardizeHTTPMethodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return semconv.HTTPMethod(method) -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 39681ad4b09..44b86ad8609 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -153,7 +153,7 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) + t.semconv.RecordResponseSize(ctx, n, metricOpts) } // traces diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 353e43b91fd..386f09e1b7a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.58.0" + return "0.59.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index a30988f25d0..599d59cd130 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,21 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.34.0/0.56.0/0.10.0] 2025-01-17 + +### Changed + +- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167) + +### Fixed + +- Relax minimum Go version to 1.22.0 in various modules. (#6073) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143) + ## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 ### Added @@ -37,9 +52,6 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) - Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) - - - ## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 ### Added @@ -3185,7 +3197,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...HEAD +[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 [1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 [1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index efec278905b..d9a19207625 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -1,6 +1,6 @@ # OpenTelemetry-Go -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index ffa9b61258a..4ebef4f9ddf 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -130,6 +130,6 @@ Importantly, bump any package versions referenced to be the latest one you just Bump the dependencies in the following Go services: -- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) +- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 2171bee3c84..8409b5f8f95 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -294,7 +294,7 @@ func (c *client) MarshalLog() interface{} { Type string Endpoint string }{ - Type: "otlphttpgrpc", + Type: "otlptracegrpc", Endpoint: c.endpoint, } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 8ea156a0985..f156ee66720 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.33.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 0a29a2f13d8..4f80c898a1d 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -14,12 +14,6 @@ "matchDepTypes": ["indirect"], "enabled": true }, - { - "matchFileNames": ["internal/tools/**"], - "matchManagers": ["gomod"], - "matchDepTypes": ["indirect"], - "enabled": false - }, { "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], "groupName": "googleapis" diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index ba7db488950..6b403851073 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.33.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index fb7d12673eb..eb22002d824 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.33.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 9f878cd1fe7..ce4fe59b0e4 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.33.0 + version: v1.34.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -23,11 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.55.0 + version: v0.56.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.9.0 + version: v0.10.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index d7099c35bc4..b342a0a9401 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -311,7 +311,8 @@ type ResourceSpans struct { // A list of ScopeSpans that originate from a resource. ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_spans" field which have their own schema_url field. @@ -384,7 +385,8 @@ type ScopeSpans struct { // A list of Spans that originate from an instrumentation scope. Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all spans and span events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml new file mode 100644 index 00000000000..2346df13517 --- /dev/null +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -0,0 +1,77 @@ +output: + # Make output more digestible with quickfix in vim/emacs/etc. + sort-results: true + print-issued-lines: false + +linters: + # We'll track the golangci-lint default linters manually + # instead of letting them change without our control. + disable-all: true + enable: + # golangci-lint defaults: + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - unused + + # Our own extras: + - gofumpt + - nolintlint # lints nolint directives + - revive + +linters-settings: + govet: + # These govet checks are disabled by default, but they're useful. + enable: + - niliness + - reflectvaluecompare + - sortslice + - unusedwrite + + errcheck: + exclude-functions: + # These methods can not fail. + # They operate on an in-memory buffer. + - (*go.uber.org/zap/buffer.Buffer).Write + - (*go.uber.org/zap/buffer.Buffer).WriteByte + - (*go.uber.org/zap/buffer.Buffer).WriteString + + - (*go.uber.org/zap/zapio.Writer).Close + - (*go.uber.org/zap/zapio.Writer).Sync + - (*go.uber.org/zap/zapio.Writer).Write + # Write to zapio.Writer cannot fail, + # so io.WriteString on it cannot fail. + - io.WriteString(*go.uber.org/zap/zapio.Writer) + + # Writing a plain string to a fmt.State cannot fail. + - io.WriteString(fmt.State) + +issues: + # Print all issues reported by all linters. + max-issues-per-linter: 0 + max-same-issues: 0 + + # Don't ignore some of the issues that golangci-lint considers okay. + # This includes documenting all exported entities. + exclude-use-default: false + + exclude-rules: + # Don't warn on unused parameters. + # Parameter names are useful; replacing them with '_' is undesirable. + - linters: [revive] + text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _' + + # staticcheck already has smarter checks for empty blocks. + # revive's empty-block linter has false positives. + # For example, as of writing this, the following is not allowed. + # for foo() { } + - linters: [revive] + text: 'empty-block: this block is empty, you can remove it' + + # Ignore logger.Sync() errcheck failures in example_test.go + # since those are intended to be uncomplicated examples. + - linters: [errcheck] + path: example_test.go + text: 'Error return value of `logger.Sync` is not checked' diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl index 92aa65d660b..4fea3027af3 100644 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -1,7 +1,15 @@ # :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +
+ Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -92,7 +100,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 1793b08c89a..6d6cd5f4d70 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,7 +1,107 @@ # Changelog All notable changes to this project will be documented in this file. -This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 1.27.0 (20 Feb 2024) +Enhancements: +* [#1378][]: Add `WithLazy` method for `SugaredLogger`. +* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`. +* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`. +* [#1416][]: Add `WithPanicHook` option for testing panic logs. + +Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release. + +[#1378]: https://github.com/uber-go/zap/pull/1378 +[#1399]: https://github.com/uber-go/zap/pull/1399 +[#1406]: https://github.com/uber-go/zap/pull/1406 +[#1416]: https://github.com/uber-go/zap/pull/1416 + +## 1.26.0 (14 Sep 2023) +Enhancements: +* [#1297][]: Add Dict as a Field. +* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured +context. +* [#1350][]: String encoding is much (~50%) faster now. + +Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release. + +[#1297]: https://github.com/uber-go/zap/pull/1297 +[#1319]: https://github.com/uber-go/zap/pull/1319 +[#1350]: https://github.com/uber-go/zap/pull/1350 + +## 1.25.0 (1 Aug 2023) + +This release contains several improvements including performance, API additions, +and two new experimental packages whose APIs are unstable and may change in the +future. + +Enhancements: +* [#1246][]: Add `zap/exp/zapslog` package for integration with slog. +* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. +* [#1281][]: Add `zap/exp/expfield` package which contains helper methods +`Str` and `Strs` for constructing String-like zap.Fields. +* [#1310][]: Reduce stack size on `Any`. + +Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions +to this release. + +[#1246]: https://github.com/uber-go/zap/pull/1246 +[#1273]: https://github.com/uber-go/zap/pull/1273 +[#1281]: https://github.com/uber-go/zap/pull/1281 +[#1310]: https://github.com/uber-go/zap/pull/1310 + +## 1.24.0 (30 Nov 2022) + +Enhancements: +* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the + current minimum enabled log level. +* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically. + +Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their +contributions to this release. + +[#1148]: https://github.coml/uber-go/zap/pull/1148 +[#1185]: https://github.coml/uber-go/zap/pull/1185 + +## 1.23.0 (24 Aug 2022) + +Enhancements: +* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a + `LevelEnabler` or `Core`. +* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects + that implement `String() string`. + +[#1147]: https://github.com/uber-go/zap/pull/1147 +[#1155]: https://github.com/uber-go/zap/pull/1155 + +## 1.22.0 (8 Aug 2022) + +Enhancements: +* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log + arrays of objects. With these two constructors, you don't need to implement + `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement + `zapcore.ObjectMarshaler`. +* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing + `SugaredLogger` with the provided options applied. +* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level. + These functions provide a string joining behavior similar to `fmt.Println`. +* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the + logger for `Fatal`-level log entries. This defaults to exiting the program. +* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or + `NewDevelopment` to panic if the system was unable to build the logger. +* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for + a statement dynamically. + +Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun +for their contributions to this release. + +[#1071]: https://github.com/uber-go/zap/pull/1071 +[#1079]: https://github.com/uber-go/zap/pull/1079 +[#1080]: https://github.com/uber-go/zap/pull/1080 +[#1088]: https://github.com/uber-go/zap/pull/1088 +[#1108]: https://github.com/uber-go/zap/pull/1108 +[#1118]: https://github.com/uber-go/zap/pull/1118 ## 1.21.0 (7 Feb 2022) @@ -123,6 +223,16 @@ Enhancements: Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. +[#865]: https://github.com/uber-go/zap/pull/865 +[#867]: https://github.com/uber-go/zap/pull/867 +[#881]: https://github.com/uber-go/zap/pull/881 +[#903]: https://github.com/uber-go/zap/pull/903 +[#912]: https://github.com/uber-go/zap/pull/912 +[#913]: https://github.com/uber-go/zap/pull/913 +[#928]: https://github.com/uber-go/zap/pull/928 +[#931]: https://github.com/uber-go/zap/pull/931 +[#936]: https://github.com/uber-go/zap/pull/936 + ## 1.16.0 (1 Sep 2020) Bugfixes: @@ -144,6 +254,17 @@ Enhancements: Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. +[#629]: https://github.com/uber-go/zap/pull/629 +[#697]: https://github.com/uber-go/zap/pull/697 +[#828]: https://github.com/uber-go/zap/pull/828 +[#835]: https://github.com/uber-go/zap/pull/835 +[#843]: https://github.com/uber-go/zap/pull/843 +[#844]: https://github.com/uber-go/zap/pull/844 +[#852]: https://github.com/uber-go/zap/pull/852 +[#854]: https://github.com/uber-go/zap/pull/854 +[#861]: https://github.com/uber-go/zap/pull/861 +[#862]: https://github.com/uber-go/zap/pull/862 + ## 1.15.0 (23 Apr 2020) Bugfixes: @@ -160,6 +281,11 @@ Enhancements: Thanks to @danielbprice for their contributions to this release. +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 + ## 1.14.1 (14 Mar 2020) Bugfixes: @@ -172,6 +298,10 @@ Bugfixes: Thanks to @YashishDua for their contributions to this release. +[#791]: https://github.com/uber-go/zap/pull/791 +[#795]: https://github.com/uber-go/zap/pull/795 +[#799]: https://github.com/uber-go/zap/pull/799 + ## 1.14.0 (20 Feb 2020) Enhancements: @@ -182,6 +312,11 @@ Enhancements: Thanks to @caibirdme for their contributions to this release. +[#771]: https://github.com/uber-go/zap/pull/771 +[#773]: https://github.com/uber-go/zap/pull/773 +[#775]: https://github.com/uber-go/zap/pull/775 +[#786]: https://github.com/uber-go/zap/pull/786 + ## 1.13.0 (13 Nov 2019) Enhancements: @@ -190,11 +325,15 @@ Enhancements: Thanks to @jbizzle for their contributions to this release. +[#758]: https://github.com/uber-go/zap/pull/758 + ## 1.12.0 (29 Oct 2019) Enhancements: * [#751][]: Migrate to Go modules. +[#751]: https://github.com/uber-go/zap/pull/751 + ## 1.11.0 (21 Oct 2019) Enhancements: @@ -203,6 +342,9 @@ Enhancements: Thanks to @juicemia, @uhthomas for their contributions to this release. +[#725]: https://github.com/uber-go/zap/pull/725 +[#736]: https://github.com/uber-go/zap/pull/736 + ## 1.10.0 (29 Apr 2019) Bugfixes: @@ -220,13 +362,21 @@ Enhancements: Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions to this release. -## v1.9.1 (06 Aug 2018) +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 + +## 1.9.1 (06 Aug 2018) Bugfixes: * [#614][]: MapObjectEncoder should not ignore empty slices. -## v1.9.0 (19 Jul 2018) +[#614]: https://github.com/uber-go/zap/pull/614 + +## 1.9.0 (19 Jul 2018) Enhancements: * [#602][]: Reduce number of allocations when logging with reflection. @@ -235,7 +385,11 @@ Enhancements: Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and @dimroc for their contributions to this release. -## v1.8.0 (13 Apr 2018) +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 + +## 1.8.0 (13 Apr 2018) Enhancements: * [#508][]: Make log level configurable when redirecting the standard @@ -248,19 +402,28 @@ Bugfixes: Thanks to @DiSiqueira and @djui for their contributions to this release. -## v1.7.1 (25 Sep 2017) +[#508]: https://github.com/uber-go/zap/pull/508 +[#518]: https://github.com/uber-go/zap/pull/518 +[#577]: https://github.com/uber-go/zap/pull/577 +[#574]: https://github.com/uber-go/zap/pull/574 + +## 1.7.1 (25 Sep 2017) Bugfixes: * [#504][]: Store strings when using AddByteString with the map encoder. -## v1.7.0 (21 Sep 2017) +[#504]: https://github.com/uber-go/zap/pull/504 + +## 1.7.0 (21 Sep 2017) Enhancements: * [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user to specify the level of the logged messages. -## v1.6.0 (30 Aug 2017) +[#487]: https://github.com/uber-go/zap/pull/487 + +## 1.6.0 (30 Aug 2017) Enhancements: @@ -268,7 +431,10 @@ Enhancements: * [#490][]: Add a `ContextMap` method to observer logs for simpler field validation in tests. -## v1.5.0 (22 Jul 2017) +[#490]: https://github.com/uber-go/zap/pull/490 +[#491]: https://github.com/uber-go/zap/pull/491 + +## 1.5.0 (22 Jul 2017) Enhancements: @@ -281,7 +447,12 @@ Bugfixes: Thanks to @richard-tunein and @pavius for their contributions to this release. -## v1.4.1 (08 Jun 2017) +[#477]: https://github.com/uber-go/zap/pull/477 +[#465]: https://github.com/uber-go/zap/pull/465 +[#460]: https://github.com/uber-go/zap/pull/460 +[#470]: https://github.com/uber-go/zap/pull/470 + +## 1.4.1 (08 Jun 2017) This release fixes two bugs. @@ -290,7 +461,10 @@ Bugfixes: * [#435][]: Support a variety of case conventions when unmarshaling levels. * [#444][]: Fix a panic in the observer. -## v1.4.0 (12 May 2017) +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 + +## 1.4.0 (12 May 2017) This release adds a few small features and is fully backward-compatible. @@ -302,7 +476,11 @@ Enhancements: * [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a variety of operations a bit simpler. -## v1.3.0 (25 Apr 2017) +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 + +## 1.3.0 (25 Apr 2017) This release adds an enhancement to zap's testing helpers as well as the ability to marshal an AtomicLevel. It is fully backward-compatible. @@ -313,7 +491,10 @@ Enhancements: particularly useful when testing the `SugaredLogger`. * [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. -## v1.2.0 (13 Apr 2017) +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 + +## 1.2.0 (13 Apr 2017) This release adds a gRPC compatibility wrapper. It is fully backward-compatible. @@ -322,7 +503,9 @@ Enhancements: * [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements `grpclog.Logger`. -## v1.1.0 (31 Mar 2017) +[#402]: https://github.com/uber-go/zap/pull/402 + +## 1.1.0 (31 Mar 2017) This release fixes two bugs and adds some enhancements to zap's testing helpers. It is fully backward-compatible. @@ -339,7 +522,11 @@ Enhancements: Thanks to @moitias for contributing to this release. -## v1.0.0 (14 Mar 2017) +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 + +## 1.0.0 (14 Mar 2017) This is zap's first stable release. All exported APIs are now final, and no further breaking changes will be made in the 1.x release series. Anyone using a @@ -384,7 +571,21 @@ Enhancements: Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their contributions to this release. -## v1.0.0-rc.3 (7 Mar 2017) +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 + +## 1.0.0-rc.3 (7 Mar 2017) This is the third release candidate for zap's stable release. There are no breaking changes. @@ -405,7 +606,12 @@ Enhancements: Thanks to @ansel1 and @suyash for their contributions to this release. -## v1.0.0-rc.2 (21 Feb 2017) +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 + +## 1.0.0-rc.2 (21 Feb 2017) This is the second release candidate for zap's stable release. It includes two breaking changes. @@ -442,7 +648,16 @@ Enhancements: Thanks to @skipor and @chapsuk for their contributions to this release. -## v1.0.0-rc.1 (14 Feb 2017) +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 + +## 1.0.0-rc.1 (14 Feb 2017) This is the first release candidate for zap's stable release. There are multiple breaking changes and improvements from the pre-release version. Most notably: @@ -462,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably: * Sampling is more accurate, and doesn't depend on the standard library's shared timer heap. -## v0.1.0-beta.1 (6 Feb 2017) +## 0.1.0-beta.1 (6 Feb 2017) This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and upgrade at their leisure. Since this is the first tagged release, there are no @@ -470,95 +685,3 @@ backward compatibility concerns and all functionality is new. Early zap adopters should pin to the 0.1.x minor version until they're ready to upgrade to the upcoming stable release. - -[#316]: https://github.com/uber-go/zap/pull/316 -[#309]: https://github.com/uber-go/zap/pull/309 -[#317]: https://github.com/uber-go/zap/pull/317 -[#321]: https://github.com/uber-go/zap/pull/321 -[#325]: https://github.com/uber-go/zap/pull/325 -[#333]: https://github.com/uber-go/zap/pull/333 -[#326]: https://github.com/uber-go/zap/pull/326 -[#300]: https://github.com/uber-go/zap/pull/300 -[#339]: https://github.com/uber-go/zap/pull/339 -[#307]: https://github.com/uber-go/zap/pull/307 -[#353]: https://github.com/uber-go/zap/pull/353 -[#311]: https://github.com/uber-go/zap/pull/311 -[#366]: https://github.com/uber-go/zap/pull/366 -[#364]: https://github.com/uber-go/zap/pull/364 -[#371]: https://github.com/uber-go/zap/pull/371 -[#362]: https://github.com/uber-go/zap/pull/362 -[#369]: https://github.com/uber-go/zap/pull/369 -[#347]: https://github.com/uber-go/zap/pull/347 -[#373]: https://github.com/uber-go/zap/pull/373 -[#348]: https://github.com/uber-go/zap/pull/348 -[#327]: https://github.com/uber-go/zap/pull/327 -[#376]: https://github.com/uber-go/zap/pull/376 -[#346]: https://github.com/uber-go/zap/pull/346 -[#365]: https://github.com/uber-go/zap/pull/365 -[#372]: https://github.com/uber-go/zap/pull/372 -[#385]: https://github.com/uber-go/zap/pull/385 -[#396]: https://github.com/uber-go/zap/pull/396 -[#386]: https://github.com/uber-go/zap/pull/386 -[#402]: https://github.com/uber-go/zap/pull/402 -[#415]: https://github.com/uber-go/zap/pull/415 -[#416]: https://github.com/uber-go/zap/pull/416 -[#424]: https://github.com/uber-go/zap/pull/424 -[#425]: https://github.com/uber-go/zap/pull/425 -[#431]: https://github.com/uber-go/zap/pull/431 -[#435]: https://github.com/uber-go/zap/pull/435 -[#444]: https://github.com/uber-go/zap/pull/444 -[#477]: https://github.com/uber-go/zap/pull/477 -[#465]: https://github.com/uber-go/zap/pull/465 -[#460]: https://github.com/uber-go/zap/pull/460 -[#470]: https://github.com/uber-go/zap/pull/470 -[#487]: https://github.com/uber-go/zap/pull/487 -[#490]: https://github.com/uber-go/zap/pull/490 -[#491]: https://github.com/uber-go/zap/pull/491 -[#504]: https://github.com/uber-go/zap/pull/504 -[#508]: https://github.com/uber-go/zap/pull/508 -[#518]: https://github.com/uber-go/zap/pull/518 -[#577]: https://github.com/uber-go/zap/pull/577 -[#574]: https://github.com/uber-go/zap/pull/574 -[#602]: https://github.com/uber-go/zap/pull/602 -[#572]: https://github.com/uber-go/zap/pull/572 -[#606]: https://github.com/uber-go/zap/pull/606 -[#614]: https://github.com/uber-go/zap/pull/614 -[#657]: https://github.com/uber-go/zap/pull/657 -[#706]: https://github.com/uber-go/zap/pull/706 -[#610]: https://github.com/uber-go/zap/pull/610 -[#675]: https://github.com/uber-go/zap/pull/675 -[#704]: https://github.com/uber-go/zap/pull/704 -[#725]: https://github.com/uber-go/zap/pull/725 -[#736]: https://github.com/uber-go/zap/pull/736 -[#751]: https://github.com/uber-go/zap/pull/751 -[#758]: https://github.com/uber-go/zap/pull/758 -[#771]: https://github.com/uber-go/zap/pull/771 -[#773]: https://github.com/uber-go/zap/pull/773 -[#775]: https://github.com/uber-go/zap/pull/775 -[#786]: https://github.com/uber-go/zap/pull/786 -[#791]: https://github.com/uber-go/zap/pull/791 -[#795]: https://github.com/uber-go/zap/pull/795 -[#799]: https://github.com/uber-go/zap/pull/799 -[#804]: https://github.com/uber-go/zap/pull/804 -[#812]: https://github.com/uber-go/zap/pull/812 -[#806]: https://github.com/uber-go/zap/pull/806 -[#813]: https://github.com/uber-go/zap/pull/813 -[#629]: https://github.com/uber-go/zap/pull/629 -[#697]: https://github.com/uber-go/zap/pull/697 -[#828]: https://github.com/uber-go/zap/pull/828 -[#835]: https://github.com/uber-go/zap/pull/835 -[#843]: https://github.com/uber-go/zap/pull/843 -[#844]: https://github.com/uber-go/zap/pull/844 -[#852]: https://github.com/uber-go/zap/pull/852 -[#854]: https://github.com/uber-go/zap/pull/854 -[#861]: https://github.com/uber-go/zap/pull/861 -[#862]: https://github.com/uber-go/zap/pull/862 -[#865]: https://github.com/uber-go/zap/pull/865 -[#867]: https://github.com/uber-go/zap/pull/867 -[#881]: https://github.com/uber-go/zap/pull/881 -[#903]: https://github.com/uber-go/zap/pull/903 -[#912]: https://github.com/uber-go/zap/pull/912 -[#913]: https://github.com/uber-go/zap/pull/913 -[#928]: https://github.com/uber-go/zap/pull/928 -[#931]: https://github.com/uber-go/zap/pull/931 -[#936]: https://github.com/uber-go/zap/pull/936 diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md index 5cd96568713..ea02f3cae2d 100644 --- a/vendor/go.uber.org/zap/CONTRIBUTING.md +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -16,7 +16,7 @@ you to accept the CLA when you open your pull request. [Fork][fork], then clone the repository: -``` +```bash mkdir -p $GOPATH/src/go.uber.org cd $GOPATH/src/go.uber.org git clone git@github.com:your_github_username/zap.git @@ -27,21 +27,16 @@ git fetch upstream Make sure that the tests and the linters pass: -``` +```bash make test make lint ``` -If you're not using the minor version of Go specified in the Makefile's -`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is -fine, but it means that you'll only discover lint failures after you open your -pull request. - ## Making Changes Start by creating a new branch for your changes: -``` +```bash cd $GOPATH/src/go.uber.org/zap git checkout master git fetch upstream @@ -52,22 +47,22 @@ git checkout -b cool_new_feature Make your changes, then ensure that `make lint` and `make test` still pass. If you're satisfied with your changes, push them to your fork. -``` +```bash git push origin cool_new_feature ``` Then use the GitHub UI to open a pull request. -At this point, you're waiting on us to review your changes. We *try* to respond +At this point, you're waiting on us to review your changes. We _try_ to respond to issues and pull requests within a few business days, and we may suggest some improvements or alternatives. Once your changes are approved, one of the project maintainers will merge them. We're much more likely to approve your changes if you: -* Add tests for new functionality. -* Write a [good commit message][commit-message]. -* Maintain backward compatibility. +- Add tests for new functionality. +- Write a [good commit message][commit-message]. +- Maintain backward compatibility. [fork]: https://github.com/uber-go/zap/fork [open-issue]: https://github.com/uber-go/zap/issues/new diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE similarity index 100% rename from vendor/go.uber.org/zap/LICENSE.txt rename to vendor/go.uber.org/zap/LICENSE diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile index 9b1bc3b0e1d..eb1cee53bd5 100644 --- a/vendor/go.uber.org/zap/Makefile +++ b/vendor/go.uber.org/zap/Makefile @@ -1,50 +1,51 @@ -export GOBIN ?= $(shell pwd)/bin +# Directory containing the Makefile. +PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) -GOLINT = $(GOBIN)/golint -STATICCHECK = $(GOBIN)/staticcheck +export GOBIN ?= $(PROJECT_ROOT)/bin +export PATH := $(GOBIN):$(PATH) + +GOVULNCHECK = $(GOBIN)/govulncheck BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem # Directories containing independent Go modules. -# -# We track coverage only for the main module. -MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test +MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test -# Many Go tools take file globs or directories as arguments instead of packages. -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) +# Directories that we want to track coverage for. +COVER_DIRS = . ./exp .PHONY: all all: lint test .PHONY: lint -lint: $(GOLINT) $(STATICCHECK) - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log - @echo "Checking vet..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking lint..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking staticcheck..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e Makefile | tee -a lint.log - @echo "Checking for license headers..." - @./checklicense.sh | tee -a lint.log - @[ ! -s lint.log ] - @echo "Checking 'go mod tidy'..." - @make tidy - @if ! git diff --quiet; then \ - echo "'go mod tidy' resulted in changes or working tree is dirty:"; \ - git --no-pager diff; \ - fi - -$(GOLINT): - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): - cd tools && go install honnef.co/go/tools/cmd/staticcheck +lint: golangci-lint tidy-lint license-lint + +.PHONY: golangci-lint +golangci-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] golangci-lint: $(mod)" && \ + golangci-lint run --path-prefix $(mod)) &&) true + +.PHONY: tidy +tidy: + @$(foreach dir,$(MODULE_DIRS), \ + (cd $(dir) && go mod tidy) &&) true + +.PHONY: tidy-lint +tidy-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] tidy: $(mod)" && \ + go mod tidy && \ + git diff --exit-code -- go.mod go.sum) &&) true + + +.PHONY: license-lint +license-lint: + ./checklicense.sh + +$(GOVULNCHECK): + cd tools && go install golang.org/x/vuln/cmd/govulncheck .PHONY: test test: @@ -52,8 +53,10 @@ test: .PHONY: cover cover: - go test -race -coverprofile=cover.out -coverpkg=./... ./... - go tool cover -html=cover.out -o cover.html + @$(foreach dir,$(COVER_DIRS), ( \ + cd $(dir) && \ + go test -race -coverprofile=cover.out -coverpkg=./... ./... \ + && go tool cover -html=cover.out -o cover.html) &&) true .PHONY: bench BENCH ?= . @@ -68,6 +71,6 @@ updatereadme: rm -f README.md cat .readme.tmpl | go run internal/readme/readme.go > README.md -.PHONY: tidy -tidy: - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true +.PHONY: vulncheck +vulncheck: $(GOVULNCHECK) + $(GOVULNCHECK) ./... diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index 9c9dfe1ed7b..a17035cb6f7 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -1,7 +1,16 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +# :zap: zap + + +
Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -66,38 +75,44 @@ Log a message and 10 fields: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 2900 ns/op | +0% | 5 allocs/op -| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op -| zerolog | 10639 ns/op | +267% | 32 allocs/op -| go-kit | 14434 ns/op | +398% | 59 allocs/op -| logrus | 17104 ns/op | +490% | 81 allocs/op -| apex/log | 32424 ns/op | +1018% | 66 allocs/op -| log15 | 33579 ns/op | +1058% | 76 allocs/op +| :zap: zap | 656 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op +| zerolog | 380 ns/op | -42% | 1 allocs/op +| go-kit | 2249 ns/op | +243% | 57 allocs/op +| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op +| slog | 2481 ns/op | +278% | 42 allocs/op +| apex/log | 9591 ns/op | +1362% | 63 allocs/op +| log15 | 11393 ns/op | +1637% | 75 allocs/op +| logrus | 11654 ns/op | +1677% | 79 allocs/op Log a message with a logger that already has 10 fields of context: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 373 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op -| zerolog | 288 ns/op | -23% | 0 allocs/op -| go-kit | 11785 ns/op | +3060% | 58 allocs/op -| logrus | 19629 ns/op | +5162% | 70 allocs/op -| log15 | 21866 ns/op | +5762% | 72 allocs/op -| apex/log | 30890 ns/op | +8182% | 55 allocs/op +| :zap: zap | 67 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op +| zerolog | 35 ns/op | -48% | 0 allocs/op +| slog | 193 ns/op | +188% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op +| go-kit | 2460 ns/op | +3572% | 56 allocs/op +| log15 | 9038 ns/op | +13390% | 70 allocs/op +| apex/log | 9068 ns/op | +13434% | 53 allocs/op +| logrus | 10521 ns/op | +15603% | 68 allocs/op Log a static string, without any context or `printf`-style templating: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 381 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op -| zerolog | 369 ns/op | -3% | 0 allocs/op -| standard library | 385 ns/op | +1% | 2 allocs/op -| go-kit | 606 ns/op | +59% | 11 allocs/op -| logrus | 1730 ns/op | +354% | 25 allocs/op -| apex/log | 1998 ns/op | +424% | 7 allocs/op -| log15 | 4546 ns/op | +1093% | 22 allocs/op +| :zap: zap | 63 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op +| zerolog | 32 ns/op | -49% | 0 allocs/op +| standard library | 124 ns/op | +97% | 1 allocs/op +| slog | 196 ns/op | +211% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op +| go-kit | 213 ns/op | +238% | 9 allocs/op +| apex/log | 771 ns/op | +1124% | 5 allocs/op +| logrus | 1439 ns/op | +2184% | 23 allocs/op +| log15 | 2069 ns/op | +3184% | 20 allocs/op ## Development Status: Stable @@ -117,7 +132,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go index 5be3704a3e1..abfccb566d5 100644 --- a/vendor/go.uber.org/zap/array.go +++ b/vendor/go.uber.org/zap/array.go @@ -21,6 +21,7 @@ package zap import ( + "fmt" "time" "go.uber.org/zap/zapcore" @@ -94,11 +95,137 @@ func Int8s(key string, nums []int8) Field { return Array(key, int8s(nums)) } +// Objects constructs a field with the given key, holding a list of the +// provided objects that can be marshaled by Zap. +// +// Note that these objects must implement zapcore.ObjectMarshaler directly. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the Request type, not its pointer (*Request). +// If it's on the pointer, use ObjectValues. +// +// Given an object that implements MarshalLogObject on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Author struct{ ... } +// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var authors []Author = ... +// logger.Info("loading article", zap.Objects("authors", authors)) +// +// Similarly, given a type that implements MarshalLogObject on its pointer +// receiver, you can log a slice of pointers to that object with Objects like +// so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +// +// If instead, you have a slice of values of such an object, use the +// ObjectValues constructor. +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field { + return Array(key, objects[T](values)) +} + +type objects[T zapcore.ObjectMarshaler] []T + +func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + if err := arr.AppendObject(o); err != nil { + return err + } + } + return nil +} + +// ObjectMarshalerPtr is a constraint that specifies that the given type +// implements zapcore.ObjectMarshaler on a pointer receiver. +type ObjectMarshalerPtr[T any] interface { + *T + zapcore.ObjectMarshaler +} + +// ObjectValues constructs a field with the given key, holding a list of the +// provided objects, where pointers to these objects can be marshaled by Zap. +// +// Note that pointers to these objects must implement zapcore.ObjectMarshaler. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the *Request type, not the value (Request). +// If it's on the value, use Objects. +// +// Given an object that implements MarshalLogObject on the pointer receiver, +// you can log a slice of those objects with ObjectValues like so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +// +// If instead, you have a slice of pointers of such an object, use the Objects +// field constructor. +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { + return Array(key, objectValues[T, P](values)) +} + +type objectValues[T any, P ObjectMarshalerPtr[T]] []T + +func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range os { + // It is necessary for us to explicitly reference the "P" type. + // We cannot simply pass "&os[i]" to AppendObject because its type + // is "*T", which the type system does not consider as + // implementing ObjectMarshaler. + // Only the type "P" satisfies ObjectMarshaler, which we have + // to convert "*T" to explicitly. + var p P = &os[i] + if err := arr.AppendObject(p); err != nil { + return err + } + } + return nil +} + // Strings constructs a field that carries a slice of strings. func Strings(key string, ss []string) Field { return Array(key, stringArray(ss)) } +// Stringers constructs a field with the given key, holding a list of the +// output provided by the value's String method +// +// Given an object that implements String on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Request struct{ ... } +// func (a Request) String() string +// +// var requests []Request = ... +// logger.Info("sending requests", zap.Stringers("requests", requests)) +// +// Note that these objects must implement fmt.Stringer directly. +// That is, if you're trying to marshal a []Request, the String method +// must be declared on the Request type, not its pointer (*Request). +func Stringers[T fmt.Stringer](key string, values []T) Field { + return Array(key, stringers[T](values)) +} + +type stringers[T fmt.Stringer] []T + +func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + arr.AppendString(o.String()) + } + return nil +} + // Times constructs a field that carries a slice of time.Times. func Times(key string, ts []time.Time) Field { return Array(key, times(ts)) diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 9e929cd98e6..0b8540c2138 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -42,6 +42,11 @@ func (b *Buffer) AppendByte(v byte) { b.bs = append(b.bs, v) } +// AppendBytes writes the given slice of bytes to the Buffer. +func (b *Buffer) AppendBytes(v []byte) { + b.bs = append(b.bs, v...) +} + // AppendString writes a string to the Buffer. func (b *Buffer) AppendString(s string) { b.bs = append(b.bs, s...) diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go index 8fb3e202cf4..846323360ee 100644 --- a/vendor/go.uber.org/zap/buffer/pool.go +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -20,25 +20,29 @@ package buffer -import "sync" +import ( + "go.uber.org/zap/internal/pool" +) // A Pool is a type-safe wrapper around a sync.Pool. type Pool struct { - p *sync.Pool + p *pool.Pool[*Buffer] } // NewPool constructs a new Pool. func NewPool() Pool { - return Pool{p: &sync.Pool{ - New: func() interface{} { - return &Buffer{bs: make([]byte, 0, _size)} - }, - }} + return Pool{ + p: pool.New(func() *Buffer { + return &Buffer{ + bs: make([]byte, 0, _size), + } + }), + } } // Get retrieves a Buffer from the pool, creating one if necessary. func (p Pool) Get() *Buffer { - buf := p.p.Get().(*Buffer) + buf := p.p.Get() buf.Reset() buf.pool = p return buf diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go index 55637fb0b4b..e76e4e64fbe 100644 --- a/vendor/go.uber.org/zap/config.go +++ b/vendor/go.uber.org/zap/config.go @@ -21,7 +21,7 @@ package zap import ( - "fmt" + "errors" "sort" "time" @@ -95,6 +95,32 @@ type Config struct { // NewProductionEncoderConfig returns an opinionated EncoderConfig for // production environments. +// +// Messages encoded with this configuration will be JSON-formatted +// and will have the following keys by default: +// +// - "level": The logging level (e.g. "info", "error"). +// - "ts": The current time in number of seconds since the Unix epoch. +// - "msg": The message passed to the log statement. +// - "caller": If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - "stacktrace": If available, a stack trace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted as floating-point number of seconds since the Unix +// epoch. +// - Duration is formatted as floating-point number of seconds. +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewProductionEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder func NewProductionEncoderConfig() zapcore.EncoderConfig { return zapcore.EncoderConfig{ TimeKey: "ts", @@ -112,11 +138,22 @@ func NewProductionEncoderConfig() zapcore.EncoderConfig { } } -// NewProductionConfig is a reasonable production logging configuration. -// Logging is enabled at InfoLevel and above. +// NewProductionConfig builds a reasonable default production logging +// configuration. +// Logging is enabled at InfoLevel and above, and uses a JSON encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of ErrorLevel and above. +// DPanicLevel logs will not panic, but will write a stacktrace. +// +// Sampling is enabled at 100:100 by default, +// meaning that after the first 100 log entries +// with the same level and message in the same second, +// it will log every 100th entry +// with the same level and message in the same second. +// You may disable this behavior by setting Sampling to nil. // -// It uses a JSON encoder, writes to standard error, and enables sampling. -// Stacktraces are automatically included on logs of ErrorLevel and above. +// See [NewProductionEncoderConfig] for information +// on the default encoder configuration. func NewProductionConfig() Config { return Config{ Level: NewAtomicLevelAt(InfoLevel), @@ -134,6 +171,32 @@ func NewProductionConfig() Config { // NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for // development environments. +// +// Messages encoded with this configuration will use Zap's console encoder +// intended to print human-readable output. +// It will print log messages with the following information: +// +// - The log level (e.g. "INFO", "ERROR"). +// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - The message passed to the log statement. +// - If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - If available, a stacktrace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - Duration is formatted as a string (e.g. "1.234s"). +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewDevelopmentEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { return zapcore.EncoderConfig{ // Keys can be anything except the empty string. @@ -152,12 +215,15 @@ func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { } } -// NewDevelopmentConfig is a reasonable development logging configuration. -// Logging is enabled at DebugLevel and above. +// NewDevelopmentConfig builds a reasonable default development logging +// configuration. +// Logging is enabled at DebugLevel and above, and uses a console encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of WarnLevel and above. +// DPanicLevel logs will panic. // -// It enables development mode (which makes DPanicLevel logs panic), uses a -// console encoder, writes to standard error, and disables sampling. -// Stacktraces are automatically included on logs of WarnLevel and above. +// See [NewDevelopmentEncoderConfig] for information +// on the default encoder configuration. func NewDevelopmentConfig() Config { return Config{ Level: NewAtomicLevelAt(DebugLevel), @@ -182,7 +248,7 @@ func (cfg Config) Build(opts ...Option) (*Logger, error) { } if cfg.Level == (AtomicLevel{}) { - return nil, fmt.Errorf("missing Level") + return nil, errors.New("missing Level") } log := New( diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go index 8638dd1b965..3c50d7b4d3f 100644 --- a/vendor/go.uber.org/zap/doc.go +++ b/vendor/go.uber.org/zap/doc.go @@ -32,7 +32,7 @@ // they need to count every allocation and when they'd prefer a more familiar, // loosely typed API. // -// Choosing a Logger +// # Choosing a Logger // // In contexts where performance is nice, but not critical, use the // SugaredLogger. It's 4-10x faster than other structured logging packages and @@ -41,14 +41,15 @@ // variadic number of key-value pairs. (For more advanced use cases, they also // accept strongly typed fields - see the SugaredLogger.With documentation for // details.) -// sugar := zap.NewExample().Sugar() -// defer sugar.Sync() -// sugar.Infow("failed to fetch URL", -// "url", "http://example.com", -// "attempt", 3, -// "backoff", time.Second, -// ) -// sugar.Infof("failed to fetch URL: %s", "http://example.com") +// +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Infof("failed to fetch URL: %s", "http://example.com") // // By default, loggers are unbuffered. However, since zap's low-level APIs // allow buffering, calling Sync before letting your process exit is a good @@ -57,32 +58,35 @@ // In the rare contexts where every microsecond and every allocation matter, // use the Logger. It's even faster than the SugaredLogger and allocates far // less, but it only supports strongly-typed, structured logging. -// logger := zap.NewExample() -// defer logger.Sync() -// logger.Info("failed to fetch URL", -// zap.String("url", "http://example.com"), -// zap.Int("attempt", 3), -// zap.Duration("backoff", time.Second), -// ) +// +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) // // Choosing between the Logger and SugaredLogger doesn't need to be an // application-wide decision: converting between the two is simple and // inexpensive. -// logger := zap.NewExample() -// defer logger.Sync() -// sugar := logger.Sugar() -// plain := sugar.Desugar() // -// Configuring Zap +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// # Configuring Zap // // The simplest way to build a Logger is to use zap's opinionated presets: // NewExample, NewProduction, and NewDevelopment. These presets build a logger // with a single function call: -// logger, err := zap.NewProduction() -// if err != nil { -// log.Fatalf("can't initialize zap logger: %v", err) -// } -// defer logger.Sync() +// +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() // // Presets are fine for small projects, but larger projects and organizations // naturally require a bit more customization. For most users, zap's Config @@ -94,7 +98,7 @@ // go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration // example for sample code. // -// Extending Zap +// # Extending Zap // // The zap package itself is a relatively thin wrapper around the interfaces // in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., @@ -106,7 +110,7 @@ // Similarly, package authors can use the high-performance Encoder and Core // implementations in the zapcore package to build their own loggers. // -// Frequently Asked Questions +// # Frequently Asked Questions // // An FAQ covering everything from installation errors to design decisions is // available at https://github.com/uber-go/zap/blob/master/FAQ.md. diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go index 08ed8335436..caa04ceefd8 100644 --- a/vendor/go.uber.org/zap/encoder.go +++ b/vendor/go.uber.org/zap/encoder.go @@ -63,7 +63,7 @@ func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapco func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { - return nil, fmt.Errorf("missing EncodeTime in EncoderConfig") + return nil, errors.New("missing EncodeTime in EncoderConfig") } _encoderMutex.RLock() diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go index 65982a51e54..45f7b838dc1 100644 --- a/vendor/go.uber.org/zap/error.go +++ b/vendor/go.uber.org/zap/error.go @@ -21,14 +21,13 @@ package zap import ( - "sync" - + "go.uber.org/zap/internal/pool" "go.uber.org/zap/zapcore" ) -var _errArrayElemPool = sync.Pool{New: func() interface{} { +var _errArrayElemPool = pool.New(func() *errArrayElem { return &errArrayElem{} -}} +}) // Error is shorthand for the common idiom NamedError("error", err). func Error(err error) Field { @@ -60,11 +59,14 @@ func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { // potentially an "errorVerbose" attribute, we need to wrap it in a // type that implements LogObjectMarshaler. To prevent this from // allocating, pool the wrapper type. - elem := _errArrayElemPool.Get().(*errArrayElem) + elem := _errArrayElemPool.Get() elem.error = errs[i] - arr.AppendObject(elem) + err := arr.AppendObject(elem) elem.error = nil _errArrayElemPool.Put(elem) + if err != nil { + return err + } } return nil } diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index bbb745db5bd..6743930b823 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -25,6 +25,7 @@ import ( "math" "time" + "go.uber.org/zap/internal/stacktrace" "go.uber.org/zap/zapcore" ) @@ -374,7 +375,7 @@ func StackSkip(key string, skip int) Field { // from expanding the zapcore.Field union struct to include a byte slice. Since // taking a stacktrace is already so expensive (~10us), the extra allocation // is okay. - return String(key, takeStacktrace(skip+1)) // skip StackSkip + return String(key, stacktrace.Take(skip+1)) // skip StackSkip } // Duration constructs a field with the given key and value. The encoder @@ -410,6 +411,65 @@ func Inline(val zapcore.ObjectMarshaler) Field { } } +// Dict constructs a field containing the provided key-value pairs. +// It acts similar to [Object], but with the fields specified as arguments. +func Dict(key string, val ...Field) Field { + return dictField(key, val) +} + +// We need a function with the signature (string, T) for zap.Any. +func dictField(key string, val []Field) Field { + return Object(key, dictObject(val)) +} + +type dictObject []Field + +func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { + for _, f := range d { + f.AddTo(enc) + } + return nil +} + +// We discovered an issue where zap.Any can cause a performance degradation +// when used in new goroutines. +// +// This happens because the compiler assigns 4.8kb (one zap.Field per arm of +// switch statement) of stack space for zap.Any when it takes the form: +// +// switch v := v.(type) { +// case string: +// return String(key, v) +// case int: +// return Int(key, v) +// // ... +// default: +// return Reflect(key, v) +// } +// +// To avoid this, we use the type switch to assign a value to a single local variable +// and then call a function on it. +// The local variable is just a function reference so it doesn't allocate +// when converted to an interface{}. +// +// A fair bit of experimentation went into this. +// See also: +// +// - https://github.com/uber-go/zap/pull/1301 +// - https://github.com/uber-go/zap/pull/1303 +// - https://github.com/uber-go/zap/pull/1304 +// - https://github.com/uber-go/zap/pull/1305 +// - https://github.com/uber-go/zap/pull/1308 +// +// See https://github.com/golang/go/issues/62077 for upstream issue. +type anyFieldC[T any] func(string, T) Field + +func (f anyFieldC[T]) Any(key string, val any) Field { + v, _ := val.(T) + // val is guaranteed to be a T, except when it's nil. + return f(key, v) +} + // Any takes a key and an arbitrary value and chooses the best way to represent // them as a field, falling back to a reflection-based approach only if // necessary. @@ -418,132 +478,138 @@ func Inline(val zapcore.ObjectMarshaler) Field { // them. To minimize surprises, []byte values are treated as binary blobs, byte // values are treated as uint8, and runes are always treated as integers. func Any(key string, value interface{}) Field { - switch val := value.(type) { + var c interface{ Any(string, any) Field } + + switch value.(type) { case zapcore.ObjectMarshaler: - return Object(key, val) + c = anyFieldC[zapcore.ObjectMarshaler](Object) case zapcore.ArrayMarshaler: - return Array(key, val) + c = anyFieldC[zapcore.ArrayMarshaler](Array) + case []Field: + c = anyFieldC[[]Field](dictField) case bool: - return Bool(key, val) + c = anyFieldC[bool](Bool) case *bool: - return Boolp(key, val) + c = anyFieldC[*bool](Boolp) case []bool: - return Bools(key, val) + c = anyFieldC[[]bool](Bools) case complex128: - return Complex128(key, val) + c = anyFieldC[complex128](Complex128) case *complex128: - return Complex128p(key, val) + c = anyFieldC[*complex128](Complex128p) case []complex128: - return Complex128s(key, val) + c = anyFieldC[[]complex128](Complex128s) case complex64: - return Complex64(key, val) + c = anyFieldC[complex64](Complex64) case *complex64: - return Complex64p(key, val) + c = anyFieldC[*complex64](Complex64p) case []complex64: - return Complex64s(key, val) + c = anyFieldC[[]complex64](Complex64s) case float64: - return Float64(key, val) + c = anyFieldC[float64](Float64) case *float64: - return Float64p(key, val) + c = anyFieldC[*float64](Float64p) case []float64: - return Float64s(key, val) + c = anyFieldC[[]float64](Float64s) case float32: - return Float32(key, val) + c = anyFieldC[float32](Float32) case *float32: - return Float32p(key, val) + c = anyFieldC[*float32](Float32p) case []float32: - return Float32s(key, val) + c = anyFieldC[[]float32](Float32s) case int: - return Int(key, val) + c = anyFieldC[int](Int) case *int: - return Intp(key, val) + c = anyFieldC[*int](Intp) case []int: - return Ints(key, val) + c = anyFieldC[[]int](Ints) case int64: - return Int64(key, val) + c = anyFieldC[int64](Int64) case *int64: - return Int64p(key, val) + c = anyFieldC[*int64](Int64p) case []int64: - return Int64s(key, val) + c = anyFieldC[[]int64](Int64s) case int32: - return Int32(key, val) + c = anyFieldC[int32](Int32) case *int32: - return Int32p(key, val) + c = anyFieldC[*int32](Int32p) case []int32: - return Int32s(key, val) + c = anyFieldC[[]int32](Int32s) case int16: - return Int16(key, val) + c = anyFieldC[int16](Int16) case *int16: - return Int16p(key, val) + c = anyFieldC[*int16](Int16p) case []int16: - return Int16s(key, val) + c = anyFieldC[[]int16](Int16s) case int8: - return Int8(key, val) + c = anyFieldC[int8](Int8) case *int8: - return Int8p(key, val) + c = anyFieldC[*int8](Int8p) case []int8: - return Int8s(key, val) + c = anyFieldC[[]int8](Int8s) case string: - return String(key, val) + c = anyFieldC[string](String) case *string: - return Stringp(key, val) + c = anyFieldC[*string](Stringp) case []string: - return Strings(key, val) + c = anyFieldC[[]string](Strings) case uint: - return Uint(key, val) + c = anyFieldC[uint](Uint) case *uint: - return Uintp(key, val) + c = anyFieldC[*uint](Uintp) case []uint: - return Uints(key, val) + c = anyFieldC[[]uint](Uints) case uint64: - return Uint64(key, val) + c = anyFieldC[uint64](Uint64) case *uint64: - return Uint64p(key, val) + c = anyFieldC[*uint64](Uint64p) case []uint64: - return Uint64s(key, val) + c = anyFieldC[[]uint64](Uint64s) case uint32: - return Uint32(key, val) + c = anyFieldC[uint32](Uint32) case *uint32: - return Uint32p(key, val) + c = anyFieldC[*uint32](Uint32p) case []uint32: - return Uint32s(key, val) + c = anyFieldC[[]uint32](Uint32s) case uint16: - return Uint16(key, val) + c = anyFieldC[uint16](Uint16) case *uint16: - return Uint16p(key, val) + c = anyFieldC[*uint16](Uint16p) case []uint16: - return Uint16s(key, val) + c = anyFieldC[[]uint16](Uint16s) case uint8: - return Uint8(key, val) + c = anyFieldC[uint8](Uint8) case *uint8: - return Uint8p(key, val) + c = anyFieldC[*uint8](Uint8p) case []byte: - return Binary(key, val) + c = anyFieldC[[]byte](Binary) case uintptr: - return Uintptr(key, val) + c = anyFieldC[uintptr](Uintptr) case *uintptr: - return Uintptrp(key, val) + c = anyFieldC[*uintptr](Uintptrp) case []uintptr: - return Uintptrs(key, val) + c = anyFieldC[[]uintptr](Uintptrs) case time.Time: - return Time(key, val) + c = anyFieldC[time.Time](Time) case *time.Time: - return Timep(key, val) + c = anyFieldC[*time.Time](Timep) case []time.Time: - return Times(key, val) + c = anyFieldC[[]time.Time](Times) case time.Duration: - return Duration(key, val) + c = anyFieldC[time.Duration](Duration) case *time.Duration: - return Durationp(key, val) + c = anyFieldC[*time.Duration](Durationp) case []time.Duration: - return Durations(key, val) + c = anyFieldC[[]time.Duration](Durations) case error: - return NamedError(key, val) + c = anyFieldC[error](NamedError) case []error: - return Errors(key, val) + c = anyFieldC[[]error](Errors) case fmt.Stringer: - return Stringer(key, val) + c = anyFieldC[fmt.Stringer](Stringer) default: - return Reflect(key, val) + c = anyFieldC[any](Reflect) } + + return c.Any(key, value) } diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go index 1297c33b328..2be8f651500 100644 --- a/vendor/go.uber.org/zap/http_handler.go +++ b/vendor/go.uber.org/zap/http_handler.go @@ -22,6 +22,7 @@ package zap import ( "encoding/json" + "errors" "fmt" "io" "net/http" @@ -32,22 +33,23 @@ import ( // ServeHTTP is a simple JSON endpoint that can report on or change the current // logging level. // -// GET +// # GET // // The GET request returns a JSON description of the current logging level like: -// {"level":"info"} // -// PUT +// {"level":"info"} +// +// # PUT // // The PUT request changes the logging level. It is perfectly safe to change the // logging level while a program is running. Two content types are supported: // -// Content-Type: application/x-www-form-urlencoded +// Content-Type: application/x-www-form-urlencoded // // With this content type, the level can be provided through the request body or // a query parameter. The log level is URL encoded like: // -// level=debug +// level=debug // // The request body takes precedence over the query parameter, if both are // specified. @@ -55,19 +57,25 @@ import ( // This content type is the default for a curl PUT request. Following are two // example curl requests that both set the logging level to debug. // -// curl -X PUT localhost:8080/log/level?level=debug -// curl -X PUT localhost:8080/log/level -d level=debug +// curl -X PUT localhost:8080/log/level?level=debug +// curl -X PUT localhost:8080/log/level -d level=debug // // For any other content type, the payload is expected to be JSON encoded and // look like: // -// {"level":"info"} +// {"level":"info"} // // An example curl request could look like this: // -// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' -// +// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if err := lvl.serveHTTP(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "internal error: %v", err) + } +} + +func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error { type errorResponse struct { Error string `json:"error"` } @@ -79,19 +87,20 @@ func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { switch r.Method { case http.MethodGet: - enc.Encode(payload{Level: lvl.Level()}) + return enc.Encode(payload{Level: lvl.Level()}) + case http.MethodPut: requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) if err != nil { w.WriteHeader(http.StatusBadRequest) - enc.Encode(errorResponse{Error: err.Error()}) - return + return enc.Encode(errorResponse{Error: err.Error()}) } lvl.SetLevel(requestedLvl) - enc.Encode(payload{Level: lvl.Level()}) + return enc.Encode(payload{Level: lvl.Level()}) + default: w.WriteHeader(http.StatusMethodNotAllowed) - enc.Encode(errorResponse{ + return enc.Encode(errorResponse{ Error: "Only GET and PUT are supported.", }) } @@ -108,7 +117,7 @@ func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error func decodePutURL(r *http.Request) (zapcore.Level, error) { lvl := r.FormValue("level") if lvl == "" { - return 0, fmt.Errorf("must specify logging level") + return 0, errors.New("must specify logging level") } var l zapcore.Level if err := l.UnmarshalText([]byte(lvl)); err != nil { @@ -125,8 +134,7 @@ func decodePutJSON(body io.Reader) (zapcore.Level, error) { return 0, fmt.Errorf("malformed request body: %v", err) } if pld.Level == nil { - return 0, fmt.Errorf("must specify logging level") + return 0, errors.New("must specify logging level") } return *pld.Level, nil - } diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go index dfc5b05feb7..f673f9947b8 100644 --- a/vendor/go.uber.org/zap/internal/exit/exit.go +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -24,24 +24,25 @@ package exit import "os" -var real = func() { os.Exit(1) } +var _exit = os.Exit -// Exit normally terminates the process by calling os.Exit(1). If the package -// is stubbed, it instead records a call in the testing spy. -func Exit() { - real() +// With terminates the process by calling os.Exit(code). If the package is +// stubbed, it instead records a call in the testing spy. +func With(code int) { + _exit(code) } // A StubbedExit is a testing fake for os.Exit. type StubbedExit struct { Exited bool - prev func() + Code int + prev func(code int) } // Stub substitutes a fake for the call to os.Exit(1). func Stub() *StubbedExit { - s := &StubbedExit{prev: real} - real = s.exit + s := &StubbedExit{prev: _exit} + _exit = s.exit return s } @@ -56,9 +57,10 @@ func WithStub(f func()) *StubbedExit { // Unstub restores the previous exit function. func (se *StubbedExit) Unstub() { - real = se.prev + _exit = se.prev } -func (se *StubbedExit) exit() { +func (se *StubbedExit) exit(code int) { se.Exited = true + se.Code = code } diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go new file mode 100644 index 00000000000..40bfed81e6e --- /dev/null +++ b/vendor/go.uber.org/zap/internal/level_enabler.go @@ -0,0 +1,37 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package internal and its subpackages hold types and functionality +// that are not part of Zap's public API. +package internal + +import "go.uber.org/zap/zapcore" + +// LeveledEnabler is an interface satisfied by LevelEnablers that are able to +// report their own level. +// +// This interface is defined to use more conveniently in tests and non-zapcore +// packages. +// This cannot be imported from zapcore because of the cyclic dependency. +type LeveledEnabler interface { + zapcore.LevelEnabler + + Level() zapcore.Level +} diff --git a/vendor/go.uber.org/zap/internal/pool/pool.go b/vendor/go.uber.org/zap/internal/pool/pool.go new file mode 100644 index 00000000000..60e9d2c432d --- /dev/null +++ b/vendor/go.uber.org/zap/internal/pool/pool.go @@ -0,0 +1,58 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package pool provides internal pool utilities. +package pool + +import ( + "sync" +) + +// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed +// object pooling. +// +// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will +// not be detected, so all internal pool use must take care to only store +// pointer types. +type Pool[T any] struct { + pool sync.Pool +} + +// New returns a new [Pool] for T, and will use fn to construct new Ts when +// the pool is empty. +func New[T any](fn func() T) *Pool[T] { + return &Pool[T]{ + pool: sync.Pool{ + New: func() any { + return fn() + }, + }, + } +} + +// Get gets a T from the pool, or creates a new one if the pool is empty. +func (p *Pool[T]) Get() T { + return p.pool.Get().(T) +} + +// Put returns x into the pool. +func (p *Pool[T]) Put(x T) { + p.pool.Put(x) +} diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/internal/stacktrace/stack.go similarity index 72% rename from vendor/go.uber.org/zap/stacktrace.go rename to vendor/go.uber.org/zap/internal/stacktrace/stack.go index 3d187fa5666..82af7551f93 100644 --- a/vendor/go.uber.org/zap/stacktrace.go +++ b/vendor/go.uber.org/zap/internal/stacktrace/stack.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -18,25 +18,26 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package zap +// Package stacktrace provides support for gathering stack traces +// efficiently. +package stacktrace import ( "runtime" - "sync" "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" ) -var _stacktracePool = sync.Pool{ - New: func() interface{} { - return &stacktrace{ - storage: make([]uintptr, 64), - } - }, -} +var _stackPool = pool.New(func() *Stack { + return &Stack{ + storage: make([]uintptr, 64), + } +}) -type stacktrace struct { +// Stack is a captured stack trace. +type Stack struct { pcs []uintptr // program counters; always a subslice of storage frames *runtime.Frames @@ -50,30 +51,30 @@ type stacktrace struct { storage []uintptr } -// stacktraceDepth specifies how deep of a stack trace should be captured. -type stacktraceDepth int +// Depth specifies how deep of a stack trace should be captured. +type Depth int const ( - // stacktraceFirst captures only the first frame. - stacktraceFirst stacktraceDepth = iota + // First captures only the first frame. + First Depth = iota - // stacktraceFull captures the entire call stack, allocating more + // Full captures the entire call stack, allocating more // storage for it if needed. - stacktraceFull + Full ) -// captureStacktrace captures a stack trace of the specified depth, skipping +// Capture captures a stack trace of the specified depth, skipping // the provided number of frames. skip=0 identifies the caller of -// captureStacktrace. +// Capture. // // The caller must call Free on the returned stacktrace after using it. -func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { - stack := _stacktracePool.Get().(*stacktrace) +func Capture(skip int, depth Depth) *Stack { + stack := _stackPool.Get() switch depth { - case stacktraceFirst: + case First: stack.pcs = stack.storage[:1] - case stacktraceFull: + case Full: stack.pcs = stack.storage } @@ -87,7 +88,7 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { // runtime.Callers truncates the recorded stacktrace if there is no // room in the provided slice. For the full stack trace, keep expanding // storage until there are fewer frames than there is room. - if depth == stacktraceFull { + if depth == Full { pcs := stack.pcs for numFrames == len(pcs) { pcs = make([]uintptr, len(pcs)*2) @@ -109,52 +110,56 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { // Free releases resources associated with this stacktrace // and returns it back to the pool. -func (st *stacktrace) Free() { +func (st *Stack) Free() { st.frames = nil st.pcs = nil - _stacktracePool.Put(st) + _stackPool.Put(st) } // Count reports the total number of frames in this stacktrace. // Count DOES NOT change as Next is called. -func (st *stacktrace) Count() int { +func (st *Stack) Count() int { return len(st.pcs) } // Next returns the next frame in the stack trace, // and a boolean indicating whether there are more after it. -func (st *stacktrace) Next() (_ runtime.Frame, more bool) { +func (st *Stack) Next() (_ runtime.Frame, more bool) { return st.frames.Next() } -func takeStacktrace(skip int) string { - stack := captureStacktrace(skip+1, stacktraceFull) +// Take returns a string representation of the current stacktrace. +// +// skip is the number of frames to skip before recording the stack trace. +// skip=0 identifies the caller of Take. +func Take(skip int) string { + stack := Capture(skip+1, Full) defer stack.Free() buffer := bufferpool.Get() defer buffer.Free() - stackfmt := newStackFormatter(buffer) + stackfmt := NewFormatter(buffer) stackfmt.FormatStack(stack) return buffer.String() } -// stackFormatter formats a stack trace into a readable string representation. -type stackFormatter struct { +// Formatter formats a stack trace into a readable string representation. +type Formatter struct { b *buffer.Buffer nonEmpty bool // whehther we've written at least one frame already } -// newStackFormatter builds a new stackFormatter. -func newStackFormatter(b *buffer.Buffer) stackFormatter { - return stackFormatter{b: b} +// NewFormatter builds a new Formatter. +func NewFormatter(b *buffer.Buffer) Formatter { + return Formatter{b: b} } // FormatStack formats all remaining frames in the provided stacktrace -- minus // the final runtime.main/runtime.goexit frame. -func (sf *stackFormatter) FormatStack(stack *stacktrace) { +func (sf *Formatter) FormatStack(stack *Stack) { // Note: On the last iteration, frames.Next() returns false, with a valid - // frame, but we ignore this frame. The last frame is a a runtime frame which + // frame, but we ignore this frame. The last frame is a runtime frame which // adds noise, since it's only either runtime.main or runtime.goexit. for frame, more := stack.Next(); more; frame, more = stack.Next() { sf.FormatFrame(frame) @@ -162,7 +167,7 @@ func (sf *stackFormatter) FormatStack(stack *stacktrace) { } // FormatFrame formats the given frame. -func (sf *stackFormatter) FormatFrame(frame runtime.Frame) { +func (sf *Formatter) FormatFrame(frame runtime.Frame) { if sf.nonEmpty { sf.b.AppendByte('\n') } diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go index 8f86c430f0f..155b208bd3c 100644 --- a/vendor/go.uber.org/zap/level.go +++ b/vendor/go.uber.org/zap/level.go @@ -21,7 +21,9 @@ package zap import ( - "go.uber.org/atomic" + "sync/atomic" + + "go.uber.org/zap/internal" "go.uber.org/zap/zapcore" ) @@ -70,12 +72,14 @@ type AtomicLevel struct { l *atomic.Int32 } +var _ internal.LeveledEnabler = AtomicLevel{} + // NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging // enabled. func NewAtomicLevel() AtomicLevel { - return AtomicLevel{ - l: atomic.NewInt32(int32(InfoLevel)), - } + lvl := AtomicLevel{l: new(atomic.Int32)} + lvl.l.Store(int32(InfoLevel)) + return lvl } // NewAtomicLevelAt is a convenience function that creates an AtomicLevel diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index 087c7422281..c4d30032394 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -22,11 +22,12 @@ package zap import ( "fmt" - "io/ioutil" + "io" "os" "strings" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/stacktrace" "go.uber.org/zap/zapcore" ) @@ -42,7 +43,8 @@ type Logger struct { development bool addCaller bool - onFatal zapcore.CheckWriteAction // default is WriteThenFatal + onPanic zapcore.CheckWriteHook // default is WriteThenPanic + onFatal zapcore.CheckWriteHook // default is WriteThenFatal name string errorOutput zapcore.WriteSyncer @@ -85,7 +87,7 @@ func New(core zapcore.Core, options ...Option) *Logger { func NewNop() *Logger { return &Logger{ core: zapcore.NewNopCore(), - errorOutput: zapcore.AddSync(ioutil.Discard), + errorOutput: zapcore.AddSync(io.Discard), addStack: zapcore.FatalLevel + 1, clock: zapcore.DefaultClock, } @@ -107,6 +109,19 @@ func NewDevelopment(options ...Option) (*Logger, error) { return NewDevelopmentConfig().Build(options...) } +// Must is a helper that wraps a call to a function returning (*Logger, error) +// and panics if the error is non-nil. It is intended for use in variable +// initialization such as: +// +// var logger = zap.Must(zap.NewProduction()) +func Must(logger *Logger, err error) *Logger { + if err != nil { + panic(err) + } + + return logger +} + // NewExample builds a Logger that's designed for use in zap's testable // examples. It writes DebugLevel and above logs to standard out as JSON, but // omits the timestamp and calling function to keep example output @@ -160,7 +175,8 @@ func (log *Logger) WithOptions(opts ...Option) *Logger { } // With creates a child logger and adds structured context to it. Fields added -// to the child don't affect the parent, and vice versa. +// to the child don't affect the parent, and vice versa. Any fields that +// require evaluation (such as Objects) are evaluated upon invocation of With. func (log *Logger) With(fields ...Field) *Logger { if len(fields) == 0 { return log @@ -170,6 +186,35 @@ func (log *Logger) With(fields ...Field) *Logger { return l } +// WithLazy creates a child logger and adds structured context to it lazily. +// +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// WithLazy provides a worthwhile performance optimization for contextual loggers +// when the likelihood of using the child logger is low, +// such as error paths and rarely taken branches. +// +// Similar to [With], fields added to the child don't affect the parent, and vice versa. +func (log *Logger) WithLazy(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewLazyWith(core, fields) + })) +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (log *Logger) Level() zapcore.Level { + return zapcore.LevelOf(log.core) +} + // Check returns a CheckedEntry if logging a message at the specified level // is enabled. It's a completely optional optimization; in high-performance // applications, Check can help avoid allocating a slice to hold fields. @@ -177,6 +222,16 @@ func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return log.check(lvl, msg) } +// Log logs a message at the specified level. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// Any Fields that require evaluation (such as Objects) are evaluated upon +// invocation of Log. +func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) { + if ce := log.check(lvl, msg); ce != nil { + ce.Write(fields...) + } +} + // Debug logs a message at DebugLevel. The message includes any fields passed // at the log site, as well as any fields accumulated on the logger. func (log *Logger) Debug(msg string, fields ...Field) { @@ -253,9 +308,15 @@ func (log *Logger) Core() zapcore.Core { return log.core } +// Name returns the Logger's underlying name, +// or an empty string if the logger is unnamed. +func (log *Logger) Name() string { + return log.name +} + func (log *Logger) clone() *Logger { - copy := *log - return © + clone := *log + return &clone } func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { @@ -285,18 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Set up any required terminal behavior. switch ent.Level { case zapcore.PanicLevel: - ce = ce.Should(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) case zapcore.FatalLevel: - onFatal := log.onFatal - // Noop is the default value for CheckWriteAction, and it leads to - // continued execution after a Fatal which is unexpected. - if onFatal == zapcore.WriteThenNoop { - onFatal = zapcore.WriteThenFatal - } - ce = ce.Should(ent, onFatal) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal)) case zapcore.DPanicLevel: if log.development { - ce = ce.Should(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) } } @@ -317,17 +372,17 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Adding the caller or stack trace requires capturing the callers of // this function. We'll share information between these two. - stackDepth := stacktraceFirst + stackDepth := stacktrace.First if addStack { - stackDepth = stacktraceFull + stackDepth = stacktrace.Full } - stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth) + stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth) defer stack.Free() if stack.Count() == 0 { if log.addCaller { fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) - log.errorOutput.Sync() + _ = log.errorOutput.Sync() } return ce } @@ -348,7 +403,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { buffer := bufferpool.Get() defer buffer.Free() - stackfmt := newStackFormatter(buffer) + stackfmt := stacktrace.NewFormatter(buffer) // We've already extracted the first frame, so format that // separately and defer to stackfmt for the rest. @@ -361,3 +416,20 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return ce } + +func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook { + // A nil or WriteThenNoop hook will lead to continued execution after + // a Panic or Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the log.Fatal. + if override == nil || override == zapcore.WriteThenNoop { + return defaultHook + } + return override +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index e9e66161f51..43d357ac902 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -132,10 +132,44 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { }) } +// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs. +// Zap will call this hook after writing a log statement with a Panic/DPanic level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a Panic/DPanic log message, but it will not start a panic. +// +// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit)) +// +// This is useful for testing Panic/DPanic log output. +func WithPanicHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onPanic = hook + }) +} + // OnFatal sets the action to take on fatal logs. +// +// Deprecated: Use [WithFatalHook] instead. func OnFatal(action zapcore.CheckWriteAction) Option { + return WithFatalHook(action) +} + +// WithFatalHook sets a CheckWriteHook to run on fatal logs. +// Zap will call this hook after writing a log statement with a Fatal level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a fatal log message, but it will not exit the +// program. +// +// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit)) +// +// It is important that the provided CheckWriteHook stops the control flow at +// the current statement to meet expectations of callers of the logger. +// We recommend calling os.Exit or runtime.Goexit inside custom hooks at +// minimum. +func WithFatalHook(hook zapcore.CheckWriteHook) Option { return optionFunc(func(log *Logger) { - log.onFatal = action + log.onFatal = hook }) } diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go index df46fa87a70..499772a00dc 100644 --- a/vendor/go.uber.org/zap/sink.go +++ b/vendor/go.uber.org/zap/sink.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -26,6 +26,7 @@ import ( "io" "net/url" "os" + "path/filepath" "strings" "sync" @@ -34,23 +35,7 @@ import ( const schemeFile = "file" -var ( - _sinkMutex sync.RWMutex - _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme -) - -func init() { - resetSinkRegistry() -} - -func resetSinkRegistry() { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() - - _sinkFactories = map[string]func(*url.URL) (Sink, error){ - schemeFile: newFileSink, - } -} +var _sinkRegistry = newSinkRegistry() // Sink defines the interface to write to and close logger destinations. type Sink interface { @@ -58,10 +43,6 @@ type Sink interface { io.Closer } -type nopCloserSink struct{ zapcore.WriteSyncer } - -func (nopCloserSink) Close() error { return nil } - type errSinkNotFound struct { scheme string } @@ -70,16 +51,30 @@ func (e *errSinkNotFound) Error() string { return fmt.Sprintf("no sink found for scheme %q", e.scheme) } -// RegisterSink registers a user-supplied factory for all sinks with a -// particular scheme. -// -// All schemes must be ASCII, valid under section 3.1 of RFC 3986 -// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already -// have a factory registered. Zap automatically registers a factory for the -// "file" scheme. -func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type sinkRegistry struct { + mu sync.Mutex + factories map[string]func(*url.URL) (Sink, error) // keyed by scheme + openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile +} + +func newSinkRegistry() *sinkRegistry { + sr := &sinkRegistry{ + factories: make(map[string]func(*url.URL) (Sink, error)), + openFile: os.OpenFile, + } + // Infallible operation: the registry is empty, so we can't have a conflict. + _ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) + return sr +} + +// RegisterScheme registers the given factory for the specific scheme. +func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + sr.mu.Lock() + defer sr.mu.Unlock() if scheme == "" { return errors.New("can't register a sink factory for empty string") @@ -88,14 +83,22 @@ func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { if err != nil { return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) } - if _, ok := _sinkFactories[normalized]; ok { + if _, ok := sr.factories[normalized]; ok { return fmt.Errorf("sink factory already registered for scheme %q", normalized) } - _sinkFactories[normalized] = factory + sr.factories[normalized] = factory return nil } -func newSink(rawURL string) (Sink, error) { +func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) { + // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to + // the drive, and path is unset unless `c:/log.txt` is used. + // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file. + // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows. + if filepath.IsAbs(rawURL) { + return sr.newFileSinkFromPath(rawURL) + } + u, err := url.Parse(rawURL) if err != nil { return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) @@ -104,16 +107,27 @@ func newSink(rawURL string) (Sink, error) { u.Scheme = schemeFile } - _sinkMutex.RLock() - factory, ok := _sinkFactories[u.Scheme] - _sinkMutex.RUnlock() + sr.mu.Lock() + factory, ok := sr.factories[u.Scheme] + sr.mu.Unlock() if !ok { return nil, &errSinkNotFound{u.Scheme} } return factory(u) } -func newFileSink(u *url.URL) (Sink, error) { +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 0.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + return _sinkRegistry.RegisterSink(scheme, factory) +} + +func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) { if u.User != nil { return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) } @@ -130,13 +144,18 @@ func newFileSink(u *url.URL) (Sink, error) { if hn := u.Hostname(); hn != "" && hn != "localhost" { return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) } - switch u.Path { + + return sr.newFileSinkFromPath(u.Path) +} + +func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) { + switch path { case "stdout": return nopCloserSink{os.Stdout}, nil case "stderr": return nopCloserSink{os.Stderr}, nil } - return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) + return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666) } func normalizeScheme(s string) (string, error) { diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index 0b9651981a9..8904cd0871e 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -31,6 +31,7 @@ import ( const ( _oddNumberErrMsg = "Ignored key without a value." _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." + _multipleErrMsg = "Multiple errors without a key." ) // A SugaredLogger wraps the base Logger functionality in a slower, but less @@ -38,10 +39,19 @@ const ( // method. // // Unlike the Logger, the SugaredLogger doesn't insist on structured logging. -// For each log level, it exposes three methods: one for loosely-typed -// structured logging, one for println-style formatting, and one for -// printf-style formatting. For example, SugaredLoggers can produce InfoLevel -// output with Infow ("info with" structured context), Info, or Infof. +// For each log level, it exposes four methods: +// +// - methods named after the log level for log.Print-style logging +// - methods ending in "w" for loosely-typed structured logging +// - methods ending in "f" for log.Printf-style logging +// - methods ending in "ln" for log.Println-style logging +// +// For example, the methods for InfoLevel are: +// +// Info(...any) Print-style logging +// Infow(...any) Structured logging (read as "info with") +// Infof(string, ...any) Printf-style logging +// Infoln(...any) Println-style logging type SugaredLogger struct { base *Logger } @@ -61,27 +71,40 @@ func (s *SugaredLogger) Named(name string) *SugaredLogger { return &SugaredLogger{base: s.base.Named(name)} } +// WithOptions clones the current SugaredLogger, applies the supplied Options, +// and returns the result. It's safe to use concurrently. +func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger { + base := s.base.clone() + for _, opt := range opts { + opt.apply(base) + } + return &SugaredLogger{base: base} +} + // With adds a variadic number of fields to the logging context. It accepts a // mix of strongly-typed Field objects and loosely-typed key-value pairs. When // processing pairs, the first element of the pair is used as the field key // and the second as the field value. // // For example, -// sugaredLogger.With( -// "hello", "world", -// "failure", errors.New("oh no"), -// Stack(), -// "count", 42, -// "user", User{Name: "alice"}, -// ) +// +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// // is the equivalent of -// unsugared.With( -// String("hello", "world"), -// String("failure", "oh no"), -// Stack(), -// Int("count", 42), -// Object("user", User{Name: "alice"}), -// ) +// +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) // // Note that the keys in key-value pairs should be strings. In development, // passing a non-string key panics. In production, the logger is more @@ -92,83 +115,138 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} } -// Debug uses fmt.Sprint to construct and log a message. +// WithLazy adds a variadic number of fields to the logging context lazily. +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// Similar to [With], fields added to the child don't affect the parent, +// and vice versa. Also, the keys in key-value pairs should be strings. In development, +// passing a non-string key panics, while in production it logs an error and skips the pair. +// Passing an orphaned key has the same behavior. +func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)} +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (s *SugaredLogger) Level() zapcore.Level { + return zapcore.LevelOf(s.base.core) +} + +// Log logs the provided arguments at provided level. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) { + s.log(lvl, "", args, nil) +} + +// Debug logs the provided arguments at [DebugLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Debug(args ...interface{}) { s.log(DebugLevel, "", args, nil) } -// Info uses fmt.Sprint to construct and log a message. +// Info logs the provided arguments at [InfoLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Info(args ...interface{}) { s.log(InfoLevel, "", args, nil) } -// Warn uses fmt.Sprint to construct and log a message. +// Warn logs the provided arguments at [WarnLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Warn(args ...interface{}) { s.log(WarnLevel, "", args, nil) } -// Error uses fmt.Sprint to construct and log a message. +// Error logs the provided arguments at [ErrorLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Error(args ...interface{}) { s.log(ErrorLevel, "", args, nil) } -// DPanic uses fmt.Sprint to construct and log a message. In development, the -// logger then panics. (See DPanicLevel for details.) +// DPanic logs the provided arguments at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) DPanic(args ...interface{}) { s.log(DPanicLevel, "", args, nil) } -// Panic uses fmt.Sprint to construct and log a message, then panics. +// Panic constructs a message with the provided arguments and panics. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Panic(args ...interface{}) { s.log(PanicLevel, "", args, nil) } -// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +// Fatal constructs a message with the provided arguments and calls os.Exit. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Fatal(args ...interface{}) { s.log(FatalLevel, "", args, nil) } -// Debugf uses fmt.Sprintf to log a templated message. +// Logf formats the message according to the format specifier +// and logs it at provided level. +func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) { + s.log(lvl, template, args, nil) +} + +// Debugf formats the message according to the format specifier +// and logs it at [DebugLevel]. func (s *SugaredLogger) Debugf(template string, args ...interface{}) { s.log(DebugLevel, template, args, nil) } -// Infof uses fmt.Sprintf to log a templated message. +// Infof formats the message according to the format specifier +// and logs it at [InfoLevel]. func (s *SugaredLogger) Infof(template string, args ...interface{}) { s.log(InfoLevel, template, args, nil) } -// Warnf uses fmt.Sprintf to log a templated message. +// Warnf formats the message according to the format specifier +// and logs it at [WarnLevel]. func (s *SugaredLogger) Warnf(template string, args ...interface{}) { s.log(WarnLevel, template, args, nil) } -// Errorf uses fmt.Sprintf to log a templated message. +// Errorf formats the message according to the format specifier +// and logs it at [ErrorLevel]. func (s *SugaredLogger) Errorf(template string, args ...interface{}) { s.log(ErrorLevel, template, args, nil) } -// DPanicf uses fmt.Sprintf to log a templated message. In development, the -// logger then panics. (See DPanicLevel for details.) +// DPanicf formats the message according to the format specifier +// and logs it at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { s.log(DPanicLevel, template, args, nil) } -// Panicf uses fmt.Sprintf to log a templated message, then panics. +// Panicf formats the message according to the format specifier +// and panics. func (s *SugaredLogger) Panicf(template string, args ...interface{}) { s.log(PanicLevel, template, args, nil) } -// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +// Fatalf formats the message according to the format specifier +// and calls os.Exit. func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { s.log(FatalLevel, template, args, nil) } +// Logw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) { + s.log(lvl, msg, nil, keysAndValues) +} + // Debugw logs a message with some additional context. The variadic key-value // pairs are treated as they are in With. // // When debug-level logging is disabled, this is much faster than -// s.With(keysAndValues).Debug(msg) +// +// s.With(keysAndValues).Debug(msg) func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { s.log(DebugLevel, msg, nil, keysAndValues) } @@ -210,11 +288,61 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { s.log(FatalLevel, msg, nil, keysAndValues) } +// Logln logs a message at provided level. +// Spaces are always added between arguments. +func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) { + s.logln(lvl, args, nil) +} + +// Debugln logs a message at [DebugLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Debugln(args ...interface{}) { + s.logln(DebugLevel, args, nil) +} + +// Infoln logs a message at [InfoLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Infoln(args ...interface{}) { + s.logln(InfoLevel, args, nil) +} + +// Warnln logs a message at [WarnLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Warnln(args ...interface{}) { + s.logln(WarnLevel, args, nil) +} + +// Errorln logs a message at [ErrorLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Errorln(args ...interface{}) { + s.logln(ErrorLevel, args, nil) +} + +// DPanicln logs a message at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are always added between arguments. +func (s *SugaredLogger) DPanicln(args ...interface{}) { + s.logln(DPanicLevel, args, nil) +} + +// Panicln logs a message at [PanicLevel] and panics. +// Spaces are always added between arguments. +func (s *SugaredLogger) Panicln(args ...interface{}) { + s.logln(PanicLevel, args, nil) +} + +// Fatalln logs a message at [FatalLevel] and calls os.Exit. +// Spaces are always added between arguments. +func (s *SugaredLogger) Fatalln(args ...interface{}) { + s.logln(FatalLevel, args, nil) +} + // Sync flushes any buffered log entries. func (s *SugaredLogger) Sync() error { return s.base.Sync() } +// log message with Sprint, Sprintf, or neither. func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { // If logging at this level is completely disabled, skip the overhead of // string formatting. @@ -228,6 +356,18 @@ func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interf } } +// logln message with Sprintln +func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) { + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessageln(fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + // getMessage format with Sprint, Sprintf, or neither. func getMessage(template string, fmtArgs []interface{}) string { if len(fmtArgs) == 0 { @@ -246,15 +386,24 @@ func getMessage(template string, fmtArgs []interface{}) string { return fmt.Sprint(fmtArgs...) } +// getMessageln format with Sprintln. +func getMessageln(fmtArgs []interface{}) string { + msg := fmt.Sprintln(fmtArgs...) + return msg[:len(msg)-1] +} + func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { if len(args) == 0 { return nil } - // Allocate enough space for the worst case; if users pass only structured - // fields, we shouldn't penalize them with extra allocations. - fields := make([]Field, 0, len(args)) - var invalid invalidPairs + var ( + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields = make([]Field, 0, len(args)) + invalid invalidPairs + seenError bool + ) for i := 0; i < len(args); { // This is a strongly-typed field. Consume it and move on. @@ -264,6 +413,18 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { continue } + // If it is an error, consume it and move on. + if err, ok := args[i].(error); ok { + if !seenError { + seenError = true + fields = append(fields, Error(err)) + } else { + s.base.Error(_multipleErrMsg, Error(err)) + } + i++ + continue + } + // Make sure this element isn't a dangling key. if i == len(args)-1 { s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go index 86a709ab0be..06768c67919 100644 --- a/vendor/go.uber.org/zap/writer.go +++ b/vendor/go.uber.org/zap/writer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -23,7 +23,6 @@ package zap import ( "fmt" "io" - "io/ioutil" "go.uber.org/zap/zapcore" @@ -49,40 +48,40 @@ import ( // os.Stdout and os.Stderr. When specified without a scheme, relative file // paths also work. func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { - writers, close, err := open(paths) + writers, closeAll, err := open(paths) if err != nil { return nil, nil, err } writer := CombineWriteSyncers(writers...) - return writer, close, nil + return writer, closeAll, nil } func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { writers := make([]zapcore.WriteSyncer, 0, len(paths)) closers := make([]io.Closer, 0, len(paths)) - close := func() { + closeAll := func() { for _, c := range closers { - c.Close() + _ = c.Close() } } var openErr error for _, path := range paths { - sink, err := newSink(path) + sink, err := _sinkRegistry.newSink(path) if err != nil { - openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) + openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err)) continue } writers = append(writers, sink) closers = append(closers, sink) } if openErr != nil { - close() - return writers, nil, openErr + closeAll() + return nil, nil, openErr } - return writers, close, nil + return writers, closeAll, nil } // CombineWriteSyncers is a utility that combines multiple WriteSyncers into a @@ -93,7 +92,7 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { // using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { if len(writers) == 0 { - return zapcore.AddSync(ioutil.Discard) + return zapcore.AddSync(io.Discard) } return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) } diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go index ef2f7d9637b..a40e93b3ec8 100644 --- a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -43,6 +43,37 @@ const ( // // BufferedWriteSyncer is safe for concurrent use. You don't need to use // zapcore.Lock for WriteSyncers with BufferedWriteSyncer. +// +// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log +// destination (*os.File is a valid WriteSyncer), wrap it with +// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the +// object. +// +// func main() { +// ws := ... // your log destination +// bws := &zapcore.BufferedWriteSyncer{WS: ws} +// defer bws.Stop() +// +// // ... +// core := zapcore.NewCore(enc, bws, lvl) +// logger := zap.New(core) +// +// // ... +// } +// +// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs, +// waiting at most 30 seconds between flushes. +// You can customize these parameters by setting the Size or FlushInterval +// fields. +// For example, the following buffers up to 512 kB of logs before flushing them +// to Stderr, with a maximum of one minute between each flush. +// +// ws := &BufferedWriteSyncer{ +// WS: os.Stderr, +// Size: 512 * 1024, // 512 kB +// FlushInterval: time.Minute, +// } +// defer ws.Stop() type BufferedWriteSyncer struct { // WS is the WriteSyncer around which BufferedWriteSyncer will buffer // writes. diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index 1aa5dc36467..cc2b4e07b93 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -22,20 +22,20 @@ package zapcore import ( "fmt" - "sync" "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" ) -var _sliceEncoderPool = sync.Pool{ - New: func() interface{} { - return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} - }, -} +var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder { + return &sliceArrayEncoder{ + elems: make([]interface{}, 0, 2), + } +}) func getSliceEncoder() *sliceArrayEncoder { - return _sliceEncoderPool.Get().(*sliceArrayEncoder) + return _sliceEncoderPool.Get() } func putSliceEncoder(e *sliceArrayEncoder) { @@ -77,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, // If this ever becomes a performance bottleneck, we can implement // ArrayEncoder for our plain-text format. arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { + if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() { c.EncodeTime(ent.Time, arr) } if c.LevelKey != "" && c.EncodeLevel != nil { diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go index a1ef8b034bb..776e93f6f35 100644 --- a/vendor/go.uber.org/zap/zapcore/core.go +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -69,6 +69,15 @@ type ioCore struct { out WriteSyncer } +var ( + _ Core = (*ioCore)(nil) + _ leveledEnabler = (*ioCore)(nil) +) + +func (c *ioCore) Level() Level { + return LevelOf(c.LevelEnabler) +} + func (c *ioCore) With(fields []Field) Core { clone := c.clone() addFields(clone.enc, fields) @@ -93,9 +102,9 @@ func (c *ioCore) Write(ent Entry, fields []Field) error { return err } if ent.Level > ErrorLevel { - // Since we may be crashing the program, sync the output. Ignore Sync - // errors, pending a clean solution to issue #370. - c.Sync() + // Since we may be crashing the program, sync the output. + // Ignore Sync errors, pending a clean solution to issue #370. + _ = c.Sync() } return nil } diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go index 6e5fd565113..04462541565 100644 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -37,6 +37,9 @@ const DefaultLineEnding = "\n" const OmitKey = "" // A LevelEncoder serializes a Level to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type LevelEncoder func(Level, PrimitiveArrayEncoder) // LowercaseLevelEncoder serializes a Level to a lowercase string. For example, @@ -90,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error { } // A TimeEncoder serializes a time.Time to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type TimeEncoder func(time.Time, PrimitiveArrayEncoder) // EpochTimeEncoder serializes a time.Time to a floating-point number of seconds @@ -188,10 +194,13 @@ func (e *TimeEncoder) UnmarshalText(text []byte) error { // UnmarshalYAML unmarshals YAML to a TimeEncoder. // If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. -// timeEncoder: -// layout: 06/01/02 03:04pm +// +// timeEncoder: +// layout: 06/01/02 03:04pm +// // If value is string, it uses UnmarshalText. -// timeEncoder: iso8601 +// +// timeEncoder: iso8601 func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { var o struct { Layout string `json:"layout" yaml:"layout"` @@ -216,6 +225,9 @@ func (e *TimeEncoder) UnmarshalJSON(data []byte) error { } // A DurationEncoder serializes a time.Duration to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) // SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. @@ -259,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error { } // A CallerEncoder serializes an EntryCaller to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) // FullCallerEncoder serializes a caller in /full/path/to/package/file:line @@ -289,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error { // A NameEncoder serializes a period-separated logger name to a primitive // type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type NameEncoder func(string, PrimitiveArrayEncoder) // FullNameEncoder serializes the logger name as-is. diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 0885505b75b..459a5d7ce3c 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -24,26 +24,23 @@ import ( "fmt" "runtime" "strings" - "sync" "time" + "go.uber.org/multierr" "go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/internal/exit" - - "go.uber.org/multierr" + "go.uber.org/zap/internal/pool" ) -var ( - _cePool = sync.Pool{New: func() interface{} { - // Pre-allocate some space for cores. - return &CheckedEntry{ - cores: make([]Core, 4), - } - }} -) +var _cePool = pool.New(func() *CheckedEntry { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } +}) func getCheckedEntry() *CheckedEntry { - ce := _cePool.Get().(*CheckedEntry) + ce := _cePool.Get() ce.reset() return ce } @@ -152,6 +149,27 @@ type Entry struct { Stack string } +// CheckWriteHook is a custom action that may be executed after an entry is +// written. +// +// Register one on a CheckedEntry with the After method. +// +// if ce := logger.Check(...); ce != nil { +// ce = ce.After(hook) +// ce.Write(...) +// } +// +// You can configure the hook for Fatal log statements at the logger level with +// the zap.WithFatalHook option. +type CheckWriteHook interface { + // OnWrite is invoked with the CheckedEntry that was written and a list + // of fields added with that entry. + // + // The list of fields DOES NOT include fields that were already added + // to the logger with the With method. + OnWrite(*CheckedEntry, []Field) +} + // CheckWriteAction indicates what action to take after a log entry is // processed. Actions are ordered in increasing severity. type CheckWriteAction uint8 @@ -164,21 +182,36 @@ const ( WriteThenGoexit // WriteThenPanic causes a panic after Write. WriteThenPanic - // WriteThenFatal causes a fatal os.Exit after Write. + // WriteThenFatal causes an os.Exit(1) after Write. WriteThenFatal ) +// OnWrite implements the OnWrite method to keep CheckWriteAction compatible +// with the new CheckWriteHook interface which deprecates CheckWriteAction. +func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) { + switch a { + case WriteThenGoexit: + runtime.Goexit() + case WriteThenPanic: + panic(ce.Message) + case WriteThenFatal: + exit.With(1) + } +} + +var _ CheckWriteHook = CheckWriteAction(0) + // CheckedEntry is an Entry together with a collection of Cores that have // already agreed to log it. // -// CheckedEntry references should be created by calling AddCore or Should on a +// CheckedEntry references should be created by calling AddCore or After on a // nil *CheckedEntry. References are returned to a pool after Write, and MUST // NOT be retained after calling their Write method. type CheckedEntry struct { Entry ErrorOutput WriteSyncer dirty bool // best-effort detection of pool misuse - should CheckWriteAction + after CheckWriteHook cores []Core } @@ -186,7 +219,7 @@ func (ce *CheckedEntry) reset() { ce.Entry = Entry{} ce.ErrorOutput = nil ce.dirty = false - ce.should = WriteThenNoop + ce.after = nil for i := range ce.cores { // don't keep references to cores ce.cores[i] = nil @@ -209,7 +242,7 @@ func (ce *CheckedEntry) Write(fields ...Field) { // CheckedEntry is being used after it was returned to the pool, // the message may be an amalgamation from multiple call sites. fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) - ce.ErrorOutput.Sync() + _ = ce.ErrorOutput.Sync() // ignore error } return } @@ -221,20 +254,14 @@ func (ce *CheckedEntry) Write(fields ...Field) { } if err != nil && ce.ErrorOutput != nil { fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) - ce.ErrorOutput.Sync() + _ = ce.ErrorOutput.Sync() // ignore error } - should, msg := ce.should, ce.Message - putCheckedEntry(ce) - - switch should { - case WriteThenPanic: - panic(msg) - case WriteThenFatal: - exit.Exit() - case WriteThenGoexit: - runtime.Goexit() + hook := ce.after + if hook != nil { + hook.OnWrite(ce, fields) } + putCheckedEntry(ce) } // AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be @@ -252,11 +279,20 @@ func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { // Should sets this CheckedEntry's CheckWriteAction, which controls whether a // Core will panic or fatal after writing this log entry. Like AddCore, it's // safe to call on nil CheckedEntry references. +// +// Deprecated: Use [CheckedEntry.After] instead. func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + return ce.After(ent, should) +} + +// After sets this CheckEntry's CheckWriteHook, which will be called after this +// log entry has been written. It's safe to call this on nil CheckedEntry +// references. +func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry { if ce == nil { ce = getCheckedEntry() ce.Entry = ent } - ce.should = should + ce.after = hook return ce } diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go index 74919b0ccb1..c40df13269a 100644 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -23,7 +23,8 @@ package zapcore import ( "fmt" "reflect" - "sync" + + "go.uber.org/zap/internal/pool" ) // Encodes the given error into fields of an object. A field with the given @@ -36,13 +37,13 @@ import ( // causer (from github.com/pkg/errors), a ${key}Causes field is added with an // array of objects containing the errors this error was comprised of. // -// { -// "error": err.Error(), -// "errorVerbose": fmt.Sprintf("%+v", err), -// "errorCauses": [ -// ... -// ], -// } +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { // Try to capture panics (from nil references or otherwise) when calling // the Error() method @@ -97,15 +98,18 @@ func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { } el := newErrArrayElem(errs[i]) - arr.AppendObject(el) + err := arr.AppendObject(el) el.Free() + if err != nil { + return err + } } return nil } -var _errArrayElemPool = sync.Pool{New: func() interface{} { +var _errArrayElemPool = pool.New(func() *errArrayElem { return &errArrayElem{} -}} +}) // Encodes any error into a {"error": ...} re-using the same errors logic. // @@ -113,7 +117,7 @@ var _errArrayElemPool = sync.Pool{New: func() interface{} { type errArrayElem struct{ err error } func newErrArrayElem(err error) *errArrayElem { - e := _errArrayElemPool.Get().(*errArrayElem) + e := _errArrayElemPool.Get() e.err = err return e } diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index 95bdb0a126f..308c9781ed1 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -47,7 +47,7 @@ const ( ByteStringType // Complex128Type indicates that the field carries a complex128. Complex128Type - // Complex64Type indicates that the field carries a complex128. + // Complex64Type indicates that the field carries a complex64. Complex64Type // DurationType indicates that the field carries a time.Duration. DurationType diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go index 5db4afb302b..198def9917c 100644 --- a/vendor/go.uber.org/zap/zapcore/hook.go +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -27,6 +27,11 @@ type hooked struct { funcs []func(Entry) error } +var ( + _ Core = (*hooked)(nil) + _ leveledEnabler = (*hooked)(nil) +) + // RegisterHooks wraps a Core and runs a collection of user-defined callback // hooks each time a message is logged. Execution of the callbacks is blocking. // @@ -40,6 +45,10 @@ func RegisterHooks(core Core, hooks ...func(Entry) error) Core { } } +func (h *hooked) Level() Level { + return LevelOf(h.Core) +} + func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { // Let the wrapped Core decide whether to log this message or not. This // also gives the downstream a chance to register itself directly with the diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go index 5a1749261ab..7a11237ae97 100644 --- a/vendor/go.uber.org/zap/zapcore/increase_level.go +++ b/vendor/go.uber.org/zap/zapcore/increase_level.go @@ -27,6 +27,11 @@ type levelFilterCore struct { level LevelEnabler } +var ( + _ Core = (*levelFilterCore)(nil) + _ leveledEnabler = (*levelFilterCore)(nil) +) + // NewIncreaseLevelCore creates a core that can be used to increase the level of // an existing Core. It cannot be used to decrease the logging level, as it acts // as a filter before calling the underlying core. If level decreases the log level, @@ -45,6 +50,10 @@ func (c *levelFilterCore) Enabled(lvl Level) bool { return c.level.Enabled(lvl) } +func (c *levelFilterCore) Level() Level { + return LevelOf(c.level) +} + func (c *levelFilterCore) With(fields []Field) Core { return &levelFilterCore{c.core.With(fields), c.level} } diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index c5d751b8215..9685169b2ea 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -23,24 +23,20 @@ package zapcore import ( "encoding/base64" "math" - "sync" "time" "unicode/utf8" "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" ) // For JSON-escaping; see jsonEncoder.safeAddString below. const _hex = "0123456789abcdef" -var _jsonPool = sync.Pool{New: func() interface{} { +var _jsonPool = pool.New(func() *jsonEncoder { return &jsonEncoder{} -}} - -func getJSONEncoder() *jsonEncoder { - return _jsonPool.Get().(*jsonEncoder) -} +}) func putJSONEncoder(enc *jsonEncoder) { if enc.reflectBuf != nil { @@ -71,7 +67,9 @@ type jsonEncoder struct { // // Note that the encoder doesn't deduplicate keys, so it's possible to produce // a message like -// {"foo":"bar","foo":"baz"} +// +// {"foo":"bar","foo":"baz"} +// // This is permitted by the JSON specification, but not encouraged. Many // libraries will ignore duplicate key-value pairs (typically keeping the last // pair) when unmarshaling, but users should attempt to avoid adding duplicate @@ -352,7 +350,7 @@ func (enc *jsonEncoder) Clone() Encoder { } func (enc *jsonEncoder) clone() *jsonEncoder { - clone := getJSONEncoder() + clone := _jsonPool.Get() clone.EncoderConfig = enc.EncoderConfig clone.spaced = enc.spaced clone.openNamespaces = enc.openNamespaces @@ -374,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AppendString(ent.Level.String()) } } - if final.TimeKey != "" { + if final.TimeKey != "" && !ent.Time.IsZero() { final.AddTime(final.TimeKey, ent.Time) } if ent.LoggerName != "" && final.NameKey != "" { @@ -488,73 +486,98 @@ func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { // Unlike the standard library's encoder, it doesn't attempt to protect the // user from browser vulnerabilities or JSONP-related problems. func (enc *jsonEncoder) safeAddString(s string) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRuneInString(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.AppendString(s[i : i+size]) - i += size - } + safeAppendStringLike( + (*buffer.Buffer).AppendString, + utf8.DecodeRuneInString, + enc.buf, + s, + ) } // safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. func (enc *jsonEncoder) safeAddByteString(s []byte) { + safeAppendStringLike( + (*buffer.Buffer).AppendBytes, + utf8.DecodeRune, + enc.buf, + s, + ) +} + +// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString. +// It appends a string or byte slice to the buffer, escaping all special characters. +func safeAppendStringLike[S []byte | string]( + // appendTo appends this string-like object to the buffer. + appendTo func(*buffer.Buffer, S), + // decodeRune decodes the next rune from the string-like object + // and returns its value and width in bytes. + decodeRune func(S) (rune, int), + buf *buffer.Buffer, + s S, +) { + // The encoding logic below works by skipping over characters + // that can be safely copied as-is, + // until a character is found that needs special handling. + // At that point, we copy everything we've seen so far, + // and then handle that special character. + // + // last is the index of the last byte that was copied to the buffer. + last := 0 for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { + if s[i] >= utf8.RuneSelf { + // Character >= RuneSelf may be part of a multi-byte rune. + // They need to be decoded before we can decide how to handle them. + r, size := decodeRune(s[i:]) + if r != utf8.RuneError || size != 1 { + // No special handling required. + // Skip over this rune and continue. + i += size + continue + } + + // Invalid UTF-8 sequence. + // Replace it with the Unicode replacement character. + appendTo(buf, s[last:i]) + buf.AppendString(`\ufffd`) + i++ - continue - } - r, size := utf8.DecodeRune(s[i:]) - if enc.tryAddRuneError(r, size) { + last = i + } else { + // Character < RuneSelf is a single-byte UTF-8 rune. + if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' { + // No escaping necessary. + // Skip over this character and continue. + i++ + continue + } + + // This character needs to be escaped. + appendTo(buf, s[last:i]) + switch s[i] { + case '\\', '"': + buf.AppendByte('\\') + buf.AppendByte(s[i]) + case '\n': + buf.AppendByte('\\') + buf.AppendByte('n') + case '\r': + buf.AppendByte('\\') + buf.AppendByte('r') + case '\t': + buf.AppendByte('\\') + buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + buf.AppendString(`\u00`) + buf.AppendByte(_hex[s[i]>>4]) + buf.AppendByte(_hex[s[i]&0xF]) + } + i++ - continue + last = i } - enc.buf.Write(s[i : i+size]) - i += size } -} -// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. -func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { - if b >= utf8.RuneSelf { - return false - } - if 0x20 <= b && b != '\\' && b != '"' { - enc.buf.AppendByte(b) - return true - } - switch b { - case '\\', '"': - enc.buf.AppendByte('\\') - enc.buf.AppendByte(b) - case '\n': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('n') - case '\r': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('r') - case '\t': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('t') - default: - // Encode bytes < 0x20, except for the escape sequences above. - enc.buf.AppendString(`\u00`) - enc.buf.AppendByte(_hex[b>>4]) - enc.buf.AppendByte(_hex[b&0xF]) - } - return true -} - -func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { - if r == utf8.RuneError && size == 1 { - enc.buf.AppendString(`\ufffd`) - return true - } - return false + // add remaining + appendTo(buf, s[last:]) } diff --git a/vendor/go.uber.org/zap/zapcore/lazy_with.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go new file mode 100644 index 00000000000..05288d6a88e --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go @@ -0,0 +1,54 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "sync" + +type lazyWithCore struct { + Core + sync.Once + fields []Field +} + +// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if +// the logger is written to (or is further chained in a lon-lazy manner). +func NewLazyWith(core Core, fields []Field) Core { + return &lazyWithCore{ + Core: core, + fields: fields, + } +} + +func (d *lazyWithCore) initOnce() { + d.Once.Do(func() { + d.Core = d.Core.With(d.fields) + }) +} + +func (d *lazyWithCore) With(fields []Field) Core { + d.initOnce() + return d.Core.With(fields) +} + +func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry { + d.initOnce() + return d.Core.Check(e, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go index 56e88dc0c84..e01a2413166 100644 --- a/vendor/go.uber.org/zap/zapcore/level.go +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -53,6 +53,11 @@ const ( _minLevel = DebugLevel _maxLevel = FatalLevel + + // InvalidLevel is an invalid value for Level. + // + // Core implementations may panic if they see messages of this level. + InvalidLevel = _maxLevel + 1 ) // ParseLevel parses a level based on the lower-case or all-caps ASCII @@ -67,6 +72,43 @@ func ParseLevel(text string) (Level, error) { return level, err } +type leveledEnabler interface { + LevelEnabler + + Level() Level +} + +// LevelOf reports the minimum enabled log level for the given LevelEnabler +// from Zap's supported log levels, or [InvalidLevel] if none of them are +// enabled. +// +// A LevelEnabler may implement a 'Level() Level' method to override the +// behavior of this function. +// +// func (c *core) Level() Level { +// return c.currentLevel +// } +// +// It is recommended that [Core] implementations that wrap other cores use +// LevelOf to retrieve the level of the wrapped core. For example, +// +// func (c *coreWrapper) Level() Level { +// return zapcore.LevelOf(c.wrappedCore) +// } +func LevelOf(enab LevelEnabler) Level { + if lvler, ok := enab.(leveledEnabler); ok { + return lvler.Level() + } + + for lvl := _minLevel; lvl <= _maxLevel; lvl++ { + if enab.Enabled(lvl) { + return lvl + } + } + + return InvalidLevel +} + // String returns a lower-case ASCII representation of the log level. func (l Level) String() string { switch l { diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go index 8c116049d35..b7c093a4f2d 100644 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -21,9 +21,8 @@ package zapcore import ( + "sync/atomic" "time" - - "go.uber.org/atomic" ) const ( @@ -66,16 +65,16 @@ func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { tn := t.UnixNano() resetAfter := c.resetAt.Load() if resetAfter > tn { - return c.counter.Inc() + return c.counter.Add(1) } c.counter.Store(1) newResetAfter := tn + tick.Nanoseconds() - if !c.resetAt.CAS(resetAfter, newResetAfter) { + if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) { // We raced with another goroutine trying to reset, and it also reset // the counter to 1, so we need to reincrement the counter. - return c.counter.Inc() + return c.counter.Add(1) } return 1 @@ -113,12 +112,12 @@ func nopSamplingHook(Entry, SamplingDecision) {} // This hook may be used to get visibility into the performance of the sampler. // For example, use it to track metrics of dropped versus sampled logs. // -// var dropped atomic.Int64 -// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { -// if dec&zapcore.LogDropped > 0 { -// dropped.Inc() -// } -// }) +// var dropped atomic.Int64 +// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { +// if dec&zapcore.LogDropped > 0 { +// dropped.Inc() +// } +// }) func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { return optionFunc(func(s *sampler) { s.hook = hook @@ -135,7 +134,7 @@ func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { // // For example, // -// core = NewSamplerWithOptions(core, time.Second, 10, 5) +// core = NewSamplerWithOptions(core, time.Second, 10, 5) // // This will log the first 10 log entries with the same level and message // in a one second interval as-is. Following that, it will allow through @@ -175,6 +174,11 @@ type sampler struct { hook func(Entry, SamplingDecision) } +var ( + _ Core = (*sampler)(nil) + _ leveledEnabler = (*sampler)(nil) +) + // NewSampler creates a Core that samples incoming entries, which // caps the CPU and I/O load of logging while attempting to preserve a // representative subset of your logs. @@ -192,6 +196,10 @@ func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { return NewSamplerWithOptions(core, tick, first, thereafter) } +func (s *sampler) Level() Level { + return LevelOf(s.Core) +} + func (s *sampler) With(fields []Field) Core { return &sampler{ Core: s.Core.With(fields), diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go index 07a32eef9a4..9bb32f05576 100644 --- a/vendor/go.uber.org/zap/zapcore/tee.go +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -24,6 +24,11 @@ import "go.uber.org/multierr" type multiCore []Core +var ( + _ leveledEnabler = multiCore(nil) + _ Core = multiCore(nil) +) + // NewTee creates a Core that duplicates log entries into two or more // underlying Cores. // @@ -48,6 +53,16 @@ func (mc multiCore) With(fields []Field) Core { return clone } +func (mc multiCore) Level() Level { + minLvl := _maxLevel // mc is never empty + for i := range mc { + if lvl := LevelOf(mc[i]); lvl < minLvl { + minLvl = lvl + } + } + return minLvl +} + func (mc multiCore) Enabled(lvl Level) bool { for i := range mc { if mc[i].Enabled(lvl) { diff --git a/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go index 356e12741e0..682de254de2 100644 --- a/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go +++ b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go @@ -30,22 +30,20 @@ import ( // See https://github.com/grpc/grpc-go/blob/v1.35.0/grpclog/loggerv2.go#L77-L86 const ( - grpcLvlInfo = 0 - grpcLvlWarn = 1 - grpcLvlError = 2 - grpcLvlFatal = 3 + grpcLvlInfo int = iota + grpcLvlWarn + grpcLvlError + grpcLvlFatal ) -var ( - // _grpcToZapLevel maps gRPC log levels to zap log levels. - // See https://pkg.go.dev/go.uber.org/zap@v1.16.0/zapcore#Level - _grpcToZapLevel = map[int]zapcore.Level{ - grpcLvlInfo: zapcore.InfoLevel, - grpcLvlWarn: zapcore.WarnLevel, - grpcLvlError: zapcore.ErrorLevel, - grpcLvlFatal: zapcore.FatalLevel, - } -) +// _grpcToZapLevel maps gRPC log levels to zap log levels. +// See https://pkg.go.dev/go.uber.org/zap@v1.16.0/zapcore#Level +var _grpcToZapLevel = map[int]zapcore.Level{ + grpcLvlInfo: zapcore.InfoLevel, + grpcLvlWarn: zapcore.WarnLevel, + grpcLvlError: zapcore.ErrorLevel, + grpcLvlFatal: zapcore.FatalLevel, +} // An Option overrides a Logger's default configuration. type Option interface { @@ -61,6 +59,7 @@ func (f optionFunc) apply(log *Logger) { // WithDebug configures a Logger to print at zap's DebugLevel instead of // InfoLevel. // It only affects the Printf, Println and Print methods, which are only used in the gRPC v1 grpclog.Logger API. +// // Deprecated: use grpclog.SetLoggerV2() for v2 API. func WithDebug() Option { return optionFunc(func(logger *Logger) { @@ -146,19 +145,22 @@ type Logger struct { } // Print implements grpclog.Logger. -// Deprecated: use Info(). +// +// Deprecated: use [Logger.Info]. func (l *Logger) Print(args ...interface{}) { l.print.Print(args...) } // Printf implements grpclog.Logger. -// Deprecated: use Infof(). +// +// Deprecated: use [Logger.Infof]. func (l *Logger) Printf(format string, args ...interface{}) { l.print.Printf(format, args...) } // Println implements grpclog.Logger. -// Deprecated: use Info(). +// +// Deprecated: use [Logger.Info]. func (l *Logger) Println(args ...interface{}) { l.print.Println(args...) } diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go index 96f4a1a56ec..212538cb5a8 100644 --- a/vendor/golang.org/x/crypto/pkcs12/crypto.go +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -26,7 +26,7 @@ type pbeCipher interface { create(key []byte) (cipher.Block, error) // deriveKey returns a key derived from the given password and salt. deriveKey(salt, password []byte, iterations int) []byte - // deriveKey returns an IV derived from the given password and salt. + // deriveIV returns an IV derived from the given password and salt. deriveIV(salt, password []byte, iterations int) []byte } diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go index de58dfb8dc4..ca645d9a1af 100644 --- a/vendor/golang.org/x/net/http2/config.go +++ b/vendor/golang.org/x/net/http2/config.go @@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { return conf } -// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2 // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go index e3784123c81..5b516c55fff 100644 --- a/vendor/golang.org/x/net/http2/config_go124.go +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { fillNetHTTPConfig(conf, srv.HTTP2) } -// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { fillNetHTTPConfig(conf, tr.HTTP2) } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 090d0e1bdb5..b2e2ed33739 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -375,6 +375,7 @@ type ClientConn struct { doNotReuse bool // whether conn is marked to not be reused for any future requests closing bool closed bool + closedOnIdle bool // true if conn was closed for idleness seenSettings bool // true if we've seen a settings frame, false otherwise seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back @@ -1089,10 +1090,12 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). + // If the conn was closed for idleness, we're racing the idle timer; + // don't try to use the conn. (Issue #70515.) // // This avoids a situation where an error early in a connection's lifetime // goes unreported. - if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { st.canTakeNewRequest = true } @@ -1155,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + cc.closedOnIdle = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -2434,9 +2438,12 @@ func (rl *clientConnReadLoop) cleanup() { // This avoids a situation where new connections are constantly created, // added to the pool, fail, and are removed from the pool, without any error // being surfaced to the user. - const unusedWaitTime = 5 * time.Second + unusedWaitTime := 5 * time.Second + if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { + unusedWaitTime = cc.idleTimeout + } idleTime := cc.t.now().Sub(cc.lastActive) - if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go index 2459d069f73..51121a3d520 100644 --- a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -37,7 +37,7 @@ type Config struct { // URL. This is a constant specific to each server. TokenURL string - // Scope specifies optional requested permissions. + // Scopes specifies optional requested permissions. Scopes []string // EndpointParams specifies additional parameters for requests to the token endpoint. diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go index ca27c2e98c9..55d59999e0e 100644 --- a/vendor/golang.org/x/oauth2/google/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go @@ -28,7 +28,7 @@ import ( // AwsSecurityCredentials models AWS security credentials. type AwsSecurityCredentials struct { - // AccessKeyId is the AWS Access Key ID - Required. + // AccessKeyID is the AWS Access Key ID - Required. AccessKeyID string `json:"AccessKeyID"` // SecretAccessKey is the AWS Secret Access Key - Required. SecretAccessKey string `json:"SecretAccessKey"` diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go index 6c81a68728e..ee34924e301 100644 --- a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go @@ -329,7 +329,7 @@ type SubjectTokenSupplier interface { type AwsSecurityCredentialsSupplier interface { // AwsRegion should return the AWS region or an error. AwsRegion(ctx context.Context, options SupplierOptions) (string, error) - // GetAwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. + // AwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. // The external account token source does not cache the returned security credentials, so caching // logic should be implemented in the supplier to prevent multiple requests for the same security credentials. AwsSecurityCredentials(ctx context.Context, options SupplierOptions) (*AwsSecurityCredentials, error) diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 09f6a49b80a..74f052aa9fa 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -56,7 +56,7 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string - // Scope specifies optional requested permissions. + // Scopes specifies optional requested permissions. Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 97cb916f2c9..be8c0020701 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 4e613cf6335..3ca814f54d4 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -43,8 +43,8 @@ type DLL struct { // LoadDLL loads DLL file into memory. // // Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL], +// or use [LoadLibraryEx] directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc { } // NewLazyDLL creates new LazyDLL associated with DLL file. +// +// Warning: using NewLazyDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL]. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } @@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { } return &DLL{Name: name, Handle: h}, nil } - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 870271ed51f..0458b4f9c43 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -322,6 +322,7 @@ type jsonPackage struct { ImportPath string Dir string Name string + Target string Export string GoFiles []string CompiledGoFiles []string @@ -506,6 +507,7 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse Name: p.Name, ID: p.ImportPath, Dir: p.Dir, + Target: p.Target, GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), @@ -811,6 +813,9 @@ func jsonFlag(cfg *Config, goVersion int) string { if cfg.Mode&NeedEmbedPatterns != 0 { addFields("EmbedPatterns") } + if cfg.Mode&NeedTarget != 0 { + addFields("Target") + } return "-json=" + strings.Join(fields, ",") } diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 969da4c263c..69eec9f44dd 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -27,6 +27,7 @@ var modes = [...]struct { {NeedModule, "NeedModule"}, {NeedEmbedFiles, "NeedEmbedFiles"}, {NeedEmbedPatterns, "NeedEmbedPatterns"}, + {NeedTarget, "NeedTarget"}, } func (mode LoadMode) String() string { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 9dedf9777dc..0147d9080aa 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -118,6 +118,9 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns + // NeedTarget adds Target. + NeedTarget + // Be sure to update loadmode_string.go when adding new items! ) @@ -479,6 +482,10 @@ type Package struct { // information for the package as provided by the build system. ExportFile string + // Target is the absolute install path of the .a file, for libraries, + // and of the executable file, for binaries. + Target string + // Imports maps import paths appearing in the package's Go source files // to corresponding loaded Packages. Imports map[string]*Package diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index 8d824f7140f..93b3090c687 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -2,30 +2,35 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package typeutil defines various utilities for types, such as Map, -// a mapping from types.Type to any values. -package typeutil // import "golang.org/x/tools/go/types/typeutil" +// Package typeutil defines various utilities for types, such as [Map], +// a hash table that maps [types.Type] to any value. +package typeutil import ( "bytes" "fmt" "go/types" - "reflect" + "hash/maphash" + "unsafe" "golang.org/x/tools/internal/typeparams" ) // Map is a hash-table-based mapping from types (types.Type) to -// arbitrary any values. The concrete types that implement +// arbitrary values. The concrete types that implement // the Type interface are pointers. Since they are not canonicalized, // == cannot be used to check for equivalence, and thus we cannot // simply use a Go map. // // Just as with map[K]V, a nil *Map is a valid empty map. // -// Not thread-safe. +// Read-only map operations ([Map.At], [Map.Len], and so on) may +// safely be called concurrently. +// +// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420 +// and 69559, if the latter proposals for a generic hash-map type and +// a types.Hash function are accepted. type Map struct { - hasher Hasher // shared by many Maps table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused length int // number of map entries } @@ -36,35 +41,17 @@ type entry struct { value any } -// SetHasher sets the hasher used by Map. -// -// All Hashers are functionally equivalent but contain internal state -// used to cache the results of hashing previously seen types. -// -// A single Hasher created by MakeHasher() may be shared among many -// Maps. This is recommended if the instances have many keys in -// common, as it will amortize the cost of hash computation. -// -// A Hasher may grow without bound as new types are seen. Even when a -// type is deleted from the map, the Hasher never shrinks, since other -// types in the map may reference the deleted type indirectly. +// SetHasher has no effect. // -// Hashers are not thread-safe, and read-only operations such as -// Map.Lookup require updates to the hasher, so a full Mutex lock (not a -// read-lock) is require around all Map operations if a shared -// hasher is accessed from multiple threads. -// -// If SetHasher is not called, the Map will create a private hasher at -// the first call to Insert. -func (m *Map) SetHasher(hasher Hasher) { - m.hasher = hasher -} +// It is a relic of an optimization that is no longer profitable. Do +// not use [Hasher], [MakeHasher], or [SetHasher] in new code. +func (m *Map) SetHasher(Hasher) {} // Delete removes the entry with the given key, if any. // It returns true if the entry was found. func (m *Map) Delete(key types.Type) bool { if m != nil && m.table != nil { - hash := m.hasher.Hash(key) + hash := hash(key) bucket := m.table[hash] for i, e := range bucket { if e.key != nil && types.Identical(key, e.key) { @@ -83,7 +70,7 @@ func (m *Map) Delete(key types.Type) bool { // The result is nil if the entry is not present. func (m *Map) At(key types.Type) any { if m != nil && m.table != nil { - for _, e := range m.table[m.hasher.Hash(key)] { + for _, e := range m.table[hash(key)] { if e.key != nil && types.Identical(key, e.key) { return e.value } @@ -96,7 +83,7 @@ func (m *Map) At(key types.Type) any { // and returns the previous entry, if any. func (m *Map) Set(key types.Type, value any) (prev any) { if m.table != nil { - hash := m.hasher.Hash(key) + hash := hash(key) bucket := m.table[hash] var hole *entry for i, e := range bucket { @@ -115,10 +102,7 @@ func (m *Map) Set(key types.Type, value any) (prev any) { m.table[hash] = append(bucket, entry{key, value}) } } else { - if m.hasher.memo == nil { - m.hasher = MakeHasher() - } - hash := m.hasher.Hash(key) + hash := hash(key) m.table = map[uint32][]entry{hash: {entry{key, value}}} } @@ -195,53 +179,35 @@ func (m *Map) KeysString() string { return m.toString(false) } -//////////////////////////////////////////////////////////////////////// -// Hasher - -// A Hasher maps each type to its hash value. -// For efficiency, a hasher uses memoization; thus its memory -// footprint grows monotonically over time. -// Hashers are not thread-safe. -// Hashers have reference semantics. -// Call MakeHasher to create a Hasher. -type Hasher struct { - memo map[types.Type]uint32 - - // ptrMap records pointer identity. - ptrMap map[any]uint32 - - // sigTParams holds type parameters from the signature being hashed. - // Signatures are considered identical modulo renaming of type parameters, so - // within the scope of a signature type the identity of the signature's type - // parameters is just their index. - // - // Since the language does not currently support referring to uninstantiated - // generic types or functions, and instantiated signatures do not have type - // parameter lists, we should never encounter a second non-empty type - // parameter list when hashing a generic signature. - sigTParams *types.TypeParamList -} +// -- Hasher -- -// MakeHasher returns a new Hasher instance. -func MakeHasher() Hasher { - return Hasher{ - memo: make(map[types.Type]uint32), - ptrMap: make(map[any]uint32), - sigTParams: nil, - } +// hash returns the hash of type t. +// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted. +func hash(t types.Type) uint32 { + return theHasher.Hash(t) } +// A Hasher provides a [Hasher.Hash] method to map a type to its hash value. +// Hashers are stateless, and all are equivalent. +type Hasher struct{} + +var theHasher Hasher + +// MakeHasher returns Hasher{}. +// Hashers are stateless; all are equivalent. +func MakeHasher() Hasher { return theHasher } + // Hash computes a hash value for the given type t such that // Identical(t, t') => Hash(t) == Hash(t'). func (h Hasher) Hash(t types.Type) uint32 { - hash, ok := h.memo[t] - if !ok { - hash = h.hashFor(t) - h.memo[t] = hash - } - return hash + return hasher{inGenericSig: false}.hash(t) } +// hasher holds the state of a single Hash traversal: whether we are +// inside the signature of a generic function; this is used to +// optimize [hasher.hashTypeParam]. +type hasher struct{ inGenericSig bool } + // hashString computes the Fowler–Noll–Vo hash of s. func hashString(s string) uint32 { var h uint32 @@ -252,21 +218,21 @@ func hashString(s string) uint32 { return h } -// hashFor computes the hash of t. -func (h Hasher) hashFor(t types.Type) uint32 { +// hash computes the hash of t. +func (h hasher) hash(t types.Type) uint32 { // See Identical for rationale. switch t := t.(type) { case *types.Basic: return uint32(t.Kind()) case *types.Alias: - return h.Hash(types.Unalias(t)) + return h.hash(types.Unalias(t)) case *types.Array: - return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) + return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem()) case *types.Slice: - return 9049 + 2*h.Hash(t.Elem()) + return 9049 + 2*h.hash(t.Elem()) case *types.Struct: var hash uint32 = 9059 @@ -277,12 +243,12 @@ func (h Hasher) hashFor(t types.Type) uint32 { } hash += hashString(t.Tag(i)) hash += hashString(f.Name()) // (ignore f.Pkg) - hash += h.Hash(f.Type()) + hash += h.hash(f.Type()) } return hash case *types.Pointer: - return 9067 + 2*h.Hash(t.Elem()) + return 9067 + 2*h.hash(t.Elem()) case *types.Signature: var hash uint32 = 9091 @@ -290,33 +256,11 @@ func (h Hasher) hashFor(t types.Type) uint32 { hash *= 8863 } - // Use a separate hasher for types inside of the signature, where type - // parameter identity is modified to be (index, constraint). We must use a - // new memo for this hasher as type identity may be affected by this - // masking. For example, in func[T any](*T), the identity of *T depends on - // whether we are mapping the argument in isolation, or recursively as part - // of hashing the signature. - // - // We should never encounter a generic signature while hashing another - // generic signature, but defensively set sigTParams only if h.mask is - // unset. tparams := t.TypeParams() - if h.sigTParams == nil && tparams.Len() != 0 { - h = Hasher{ - // There may be something more efficient than discarding the existing - // memo, but it would require detecting whether types are 'tainted' by - // references to type parameters. - memo: make(map[types.Type]uint32), - // Re-using ptrMap ensures that pointer identity is preserved in this - // hasher. - ptrMap: h.ptrMap, - sigTParams: tparams, - } - } - - for i := 0; i < tparams.Len(); i++ { + for i := range tparams.Len() { + h.inGenericSig = true tparam := tparams.At(i) - hash += 7 * h.Hash(tparam.Constraint()) + hash += 7 * h.hash(tparam.Constraint()) } return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) @@ -350,17 +294,17 @@ func (h Hasher) hashFor(t types.Type) uint32 { return hash case *types.Map: - return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) + return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem()) case *types.Chan: - return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) + return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem()) case *types.Named: - hash := h.hashPtr(t.Obj()) + hash := h.hashTypeName(t.Obj()) targs := t.TypeArgs() for i := 0; i < targs.Len(); i++ { targ := targs.At(i) - hash += 2 * h.Hash(targ) + hash += 2 * h.hash(targ) } return hash @@ -374,17 +318,17 @@ func (h Hasher) hashFor(t types.Type) uint32 { panic(fmt.Sprintf("%T: %v", t, t)) } -func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { +func (h hasher) hashTuple(tuple *types.Tuple) uint32 { // See go/types.identicalTypes for rationale. n := tuple.Len() hash := 9137 + 2*uint32(n) - for i := 0; i < n; i++ { - hash += 3 * h.Hash(tuple.At(i).Type()) + for i := range n { + hash += 3 * h.hash(tuple.At(i).Type()) } return hash } -func (h Hasher) hashUnion(t *types.Union) uint32 { +func (h hasher) hashUnion(t *types.Union) uint32 { // Hash type restrictions. terms, err := typeparams.UnionTermSet(t) // if err != nil t has invalid type restrictions. Fall back on a non-zero @@ -395,11 +339,11 @@ func (h Hasher) hashUnion(t *types.Union) uint32 { return h.hashTermSet(terms) } -func (h Hasher) hashTermSet(terms []*types.Term) uint32 { +func (h hasher) hashTermSet(terms []*types.Term) uint32 { hash := 9157 + 2*uint32(len(terms)) for _, term := range terms { // term order is not significant. - termHash := h.Hash(term.Type()) + termHash := h.hash(term.Type()) if term.Tilde() { termHash *= 9161 } @@ -408,36 +352,42 @@ func (h Hasher) hashTermSet(terms []*types.Term) uint32 { return hash } -// hashTypeParam returns a hash of the type parameter t, with a hash value -// depending on whether t is contained in h.sigTParams. -// -// If h.sigTParams is set and contains t, then we are in the process of hashing -// a signature, and the hash value of t must depend only on t's index and -// constraint: signatures are considered identical modulo type parameter -// renaming. To avoid infinite recursion, we only hash the type parameter -// index, and rely on types.Identical to handle signatures where constraints -// are not identical. -// -// Otherwise the hash of t depends only on t's pointer identity. -func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 { - if h.sigTParams != nil { - i := t.Index() - if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) { - return 9173 + 3*uint32(i) - } +// hashTypeParam returns the hash of a type parameter. +func (h hasher) hashTypeParam(t *types.TypeParam) uint32 { + // Within the signature of a generic function, TypeParams are + // identical if they have the same index and constraint, so we + // hash them based on index. + // + // When we are outside a generic function, free TypeParams are + // identical iff they are the same object, so we can use a + // more discriminating hash consistent with object identity. + // This optimization saves [Map] about 4% when hashing all the + // types.Info.Types in the forward closure of net/http. + if !h.inGenericSig { + // Optimization: outside a generic function signature, + // use a more discrimating hash consistent with object identity. + return h.hashTypeName(t.Obj()) } - return h.hashPtr(t.Obj()) + return 9173 + 3*uint32(t.Index()) } -// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that -// pointers values are not dependent on the GC. -func (h Hasher) hashPtr(ptr any) uint32 { - if hash, ok := h.ptrMap[ptr]; ok { - return hash - } - hash := uint32(reflect.ValueOf(ptr).Pointer()) - h.ptrMap[ptr] = hash - return hash +var theSeed = maphash.MakeSeed() + +// hashTypeName hashes the pointer of tname. +func (hasher) hashTypeName(tname *types.TypeName) uint32 { + // Since types.Identical uses == to compare TypeNames, + // the Hash function uses maphash.Comparable. + // TODO(adonovan): or will, when it becomes available in go1.24. + // In the meantime we use the pointer's numeric value. + // + // hash := maphash.Comparable(theSeed, tname) + // + // (Another approach would be to hash the name and package + // path, and whether or not it is a package-level typename. It + // is rare for a package to define multiple local types with + // the same name.) + hash := uintptr(unsafe.Pointer(tname)) + return uint32(hash ^ (hash >> 32)) } // shallowHash computes a hash of t without looking at any of its @@ -454,7 +404,7 @@ func (h Hasher) hashPtr(ptr any) uint32 { // include m itself; there is no mention of the named type X that // might help us break the cycle. // (See comment in go/types.identical, case *Interface, for more.) -func (h Hasher) shallowHash(t types.Type) uint32 { +func (h hasher) shallowHash(t types.Type) uint32 { // t is the type of an interface method (Signature), // its params or results (Tuples), or their immediate // elements (mostly Slice, Pointer, Basic, Named), @@ -475,7 +425,7 @@ func (h Hasher) shallowHash(t types.Type) uint32 { case *types.Tuple: n := t.Len() hash := 9137 + 2*uint32(n) - for i := 0; i < n; i++ { + for i := range n { hash += 53471161 * h.shallowHash(t.At(i).Type()) } return hash @@ -508,10 +458,10 @@ func (h Hasher) shallowHash(t types.Type) uint32 { return 9127 case *types.Named: - return h.hashPtr(t.Obj()) + return h.hashTypeName(t.Obj()) case *types.TypeParam: - return h.hashPtr(t.Obj()) + return h.hashTypeParam(t) } panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go index 6f5d8a21391..5662a311dac 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -2,52 +2,183 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. - -// This file implements FindExportData. +// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go. +// This file also additionally implements FindExportData for gcexportdata.NewReader. package gcimporter import ( "bufio" + "bytes" + "errors" "fmt" + "go/build" "io" - "strconv" + "os" + "os/exec" + "path/filepath" "strings" + "sync" ) -func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { - // See $GOROOT/include/ar.h. - hdr := make([]byte, 16+12+6+6+8+10+2) - _, err = io.ReadFull(r, hdr) +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying cmd/compile created archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. +// This returns the length of the export data in bytes. +// +// This function is needed by [gcexportdata.Read], which must +// accept inputs produced by the last two releases of cmd/compile, +// plus tip. +func FindExportData(r *bufio.Reader) (size int64, err error) { + arsize, err := FindPackageDefinition(r) + if err != nil { + return + } + size = int64(arsize) + + objapi, headers, err := ReadObjectHeaders(r) + if err != nil { + return + } + size -= int64(len(objapi)) + for _, h := range headers { + size -= int64(len(h)) + } + + // Check for the binary export data section header "$$B\n". + // TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading + line, err := r.ReadSlice('\n') if err != nil { return } - // leave for debugging - if false { - fmt.Printf("header: %s", hdr) + hdr := string(line) + if hdr != "$$B\n" { + err = fmt.Errorf("unknown export data header: %q", hdr) + return } - s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) - length, err := strconv.Atoi(s) - size = int64(length) - if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { - err = fmt.Errorf("invalid archive header") + size -= int64(len(hdr)) + + // For files with a binary export data header "$$B\n", + // these are always terminated by an end-of-section marker "\n$$\n". + // So the last bytes must always be this constant. + // + // The end-of-section marker is not a part of the export data itself. + // Do not include these in size. + // + // It would be nice to have sanity check that the final bytes after + // the export data are indeed the end-of-section marker. The split + // of gcexportdata.NewReader and gcexportdata.Read make checking this + // ugly so gcimporter gives up enforcing this. The compiler and go/types + // importer do enforce this, which seems good enough. + const endofsection = "\n$$\n" + size -= int64(len(endofsection)) + + if size < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) return } - name = strings.TrimSpace(string(hdr[:16])) + return } -// FindExportData positions the reader r at the beginning of the -// export data section of an underlying cmd/compile created archive -// file by reading from it. The reader must be positioned at the -// start of the file before calling this function. -// The size result is the length of the export data in bytes. +// ReadUnified reads the contents of the unified export data from a reader r +// that contains the contents of a GC-created archive file. // -// This function is needed by [gcexportdata.Read], which must -// accept inputs produced by the last two releases of cmd/compile, -// plus tip. -func FindExportData(r *bufio.Reader) (size int64, err error) { +// On success, the reader will be positioned after the end-of-section marker "\n$$\n". +// +// Supported GC-created archive files have 4 layers of nesting: +// - An archive file containing a package definition file. +// - The package definition file contains headers followed by a data section. +// Headers are lines (≤ 4kb) that do not start with "$$". +// - The data section starts with "$$B\n" followed by export data followed +// by an end of section marker "\n$$\n". (The section start "$$\n" is no +// longer supported.) +// - The export data starts with a format byte ('u') followed by the in +// the given format. (See ReadExportDataHeader for older formats.) +// +// Putting this together, the bytes in a GC-created archive files are expected +// to look like the following. +// See cmd/internal/archive for more details on ar file headers. +// +// | \n | ar file signature +// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size. +// | go object <...>\n | objabi header +// | \n | other headers such as build id +// | $$B\n | binary format marker +// | u\n | unified export +// | $$\n | end-of-section marker +// | [optional padding] | padding byte (0x0A) if size is odd +// | [ar file header] | other ar files +// | [ar file data] | +func ReadUnified(r *bufio.Reader) (data []byte, err error) { + // We historically guaranteed headers at the default buffer size (4096) work. + // This ensures we can use ReadSlice throughout. + const minBufferSize = 4096 + r = bufio.NewReaderSize(r, minBufferSize) + + size, err := FindPackageDefinition(r) + if err != nil { + return + } + n := size + + objapi, headers, err := ReadObjectHeaders(r) + if err != nil { + return + } + n -= len(objapi) + for _, h := range headers { + n -= len(h) + } + + hdrlen, err := ReadExportDataHeader(r) + if err != nil { + return + } + n -= hdrlen + + // size also includes the end of section marker. Remove that many bytes from the end. + const marker = "\n$$\n" + n -= len(marker) + + if n < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n) + return + } + + // Read n bytes from buf. + data = make([]byte, n) + _, err = io.ReadFull(r, data) + if err != nil { + return + } + + // Check for marker at the end. + var suffix [len(marker)]byte + _, err = io.ReadFull(r, suffix[:]) + if err != nil { + return + } + if s := string(suffix[:]); s != marker { + err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker) + return + } + + return +} + +// FindPackageDefinition positions the reader r at the beginning of a package +// definition file ("__.PKGDEF") within a GC-created archive by reading +// from it, and returns the size of the package definition file in the archive. +// +// The reader must be positioned at the start of the archive file before calling +// this function, and "__.PKGDEF" is assumed to be the first file in the archive. +// +// See cmd/internal/archive for details on the archive format. +func FindPackageDefinition(r *bufio.Reader) (size int, err error) { + // Uses ReadSlice to limit risk of malformed inputs. + // Read first line to make sure this is an object file. line, err := r.ReadSlice('\n') if err != nil { @@ -61,56 +192,230 @@ func FindExportData(r *bufio.Reader) (size int64, err error) { return } - // Archive file. Scan to __.PKGDEF. - var name string - if name, size, err = readGopackHeader(r); err != nil { + // package export block should be first + size = readArchiveHeader(r, "__.PKGDEF") + if size <= 0 { + err = fmt.Errorf("not a package file") return } - arsize := size - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") - return - } + return +} + +// ReadObjectHeaders reads object headers from the reader. Object headers are +// lines that do not start with an end-of-section marker "$$". The first header +// is the objabi header. On success, the reader will be positioned at the beginning +// of the end-of-section marker. +// +// It returns an error if any header does not fit in r.Size() bytes. +func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) { + // line is a temporary buffer for headers. + // Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs. + var line []byte - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. + // objapi header should be the first line if line, err = r.ReadSlice('\n'); err != nil { err = fmt.Errorf("can't find export data (%v)", err) return } - size -= int64(len(line)) + objapi = string(line) - // Now at __.PKGDEF in archive or still at beginning of file. - // Either way, line should begin with "go object ". - if !strings.HasPrefix(string(line), "go object ") { - err = fmt.Errorf("not a Go object file") + // objapi header begins with "go object ". + if !strings.HasPrefix(objapi, "go object ") { + err = fmt.Errorf("not a go object file: %s", objapi) return } - // Skip over object headers to get to the export data section header "$$B\n". - // Object headers are lines that do not start with '$'. - for line[0] != '$' { - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) + // process remaining object header lines + for { + // check for an end of section marker "$$" + line, err = r.Peek(2) + if err != nil { + return + } + if string(line) == "$$" { + return // stop + } + + // read next header + line, err = r.ReadSlice('\n') + if err != nil { return } - size -= int64(len(line)) + headers = append(headers, string(line)) } +} - // Check for the binary export data section header "$$B\n". - hdr := string(line) - if hdr != "$$B\n" { - err = fmt.Errorf("unknown export data header: %q", hdr) +// ReadExportDataHeader reads the export data header and format from r. +// It returns the number of bytes read, or an error if the format is no longer +// supported or it failed to read. +// +// The only currently supported format is binary export data in the +// unified export format. +func ReadExportDataHeader(r *bufio.Reader) (n int, err error) { + // Read export data header. + line, err := r.ReadSlice('\n') + if err != nil { return } - // TODO(taking): Remove end-of-section marker "\n$$\n" from size. - if size < 0 { - err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) + hdr := string(line) + switch hdr { + case "$$\n": + err = fmt.Errorf("old textual export format no longer supported (recompile package)") + return + + case "$$B\n": + var format byte + format, err = r.ReadByte() + if err != nil { + return + } + // The unified export format starts with a 'u'. + switch format { + case 'u': + default: + // Older no longer supported export formats include: + // indexed export format which started with an 'i'; and + // the older binary export format which started with a 'c', + // 'd', or 'v' (from "version"). + err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format) + return + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) return } + n = len(hdr) + 1 // + 1 is for 'u' return } + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// +// FindPkg is only used in tests within x/tools. +func FindPkg(path, srcDir string) (filename, id string, err error) { + // TODO(taking): Move internal/exportdata.FindPkg into its own file, + // and then this copy into a _test package. + if path == "" { + return "", "", errors.New("path is empty") + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + var bp *build.Package + bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + if bp.Goroot && bp.Dir != "" { + filename, err = lookupGorootExport(bp.Dir) + if err == nil { + _, err = os.Stat(filename) + } + if err == nil { + return filename, bp.ImportPath, nil + } + } + goto notfound + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + } + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + f, statErr := os.Stat(filename) + if statErr == nil && !f.IsDir() { + return filename, id, nil + } + if err == nil { + err = statErr + } + } + +notfound: + if err == nil { + return "", path, fmt.Errorf("can't find import: %q", path) + } + return "", path, fmt.Errorf("can't find import: %q: %w", path, err) +} + +var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension + +var exportMap sync.Map // package dir → func() (string, error) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +// +// lookupGorootExport is only used in tests within x/tools. +func lookupGorootExport(pkgDir string) (string, error) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + err error + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) { + listOnce.Do(func() { + cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT) + var output []byte + output, err = cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + err = errors.New(string(ee.Stderr)) + } + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + err = fmt.Errorf("go list reported %d exports; expected 1", len(exports)) + return + } + + exportPath = exports[0] + }) + + return exportPath, err + }) + } + + return f.(func() (string, error))() +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index dbbca860432..3dbd21d1b90 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -23,17 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( "bufio" - "bytes" "fmt" - "go/build" "go/token" "go/types" "io" "os" - "os/exec" - "path/filepath" - "strings" - "sync" ) const ( @@ -45,127 +39,14 @@ const ( trace = false ) -var exportMap sync.Map // package dir → func() (string, bool) - -// lookupGorootExport returns the location of the export data -// (normally found in the build cache, but located in GOROOT/pkg -// in prior Go releases) for the package located in pkgDir. -// -// (We use the package's directory instead of its import path -// mainly to simplify handling of the packages in src/vendor -// and cmd/vendor.) -func lookupGorootExport(pkgDir string) (string, bool) { - f, ok := exportMap.Load(pkgDir) - if !ok { - var ( - listOnce sync.Once - exportPath string - ) - f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { - listOnce.Do(func() { - cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) - cmd.Dir = build.Default.GOROOT - var output []byte - output, err := cmd.Output() - if err != nil { - return - } - - exports := strings.Split(string(bytes.TrimSpace(output)), "\n") - if len(exports) != 1 { - return - } - - exportPath = exports[0] - }) - - return exportPath, exportPath != "" - }) - } - - return f.(func() (string, bool))() -} - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - var ok bool - if bp.Goroot && bp.Dir != "" { - filename, ok = lookupGorootExport(bp.Dir) - } - if !ok { - id = path // make sure we have an id to print in error message - return - } - } else { - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - } - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - if filename != "" { - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. // -// TODO(taking): Import is only used in tests. Move to gcimporter_test. -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { +// Import is only used in tests. +func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser - var filename, id string + var id string if lookup != nil { // With custom lookup specified, assume that caller has // converted path to a canonical import path for use in the map. @@ -184,12 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } rc = f } else { - filename, id = FindPkg(path, srcDir) + var filename string + filename, id, err = FindPkg(path, srcDir) if filename == "" { if path == "unsafe" { return types.Unsafe, nil } - return nil, fmt.Errorf("can't find import: %q", id) + return nil, err } // no need to re-import if the package was imported completely before @@ -212,54 +94,15 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } defer rc.Close() - var size int64 buf := bufio.NewReader(rc) - if size, err = FindExportData(buf); err != nil { - return - } - - var data []byte - data, err = io.ReadAll(buf) + data, err := ReadUnified(buf) if err != nil { + err = fmt.Errorf("import %q: %v", path, err) return } - if len(data) == 0 { - return nil, fmt.Errorf("no data to load a package from for path %s", id) - } - - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // Select appropriate importer. - switch data[0] { - case 'v', 'c', 'd': - // binary: emitted by cmd/compile till go1.10; obsolete. - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': - // indexed: emitted by cmd/compile till go1.19; - // now used only for serializing go/types. - // See https://github.com/golang/go/issues/69491. - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err + // unified: emitted by cmd/compile since go1.20. + _, pkg, err = UImportData(fset, packages, data, id) - case 'u': - // unified: emitted by cmd/compile since go1.20. - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } + return } - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index e260c0e8dbf..69b1d697cbe 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -5,8 +5,6 @@ // Indexed package import. // See iexport.go for the export data format. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. - package gcimporter import ( @@ -1111,3 +1109,9 @@ func (r *importReader) byte() byte { } return x } + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support.go b/vendor/golang.org/x/tools/internal/gcimporter/support.go new file mode 100644 index 00000000000..4af810dc412 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/support.go @@ -0,0 +1,30 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "bufio" + "io" + "strconv" + "strings" +) + +// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader. +func readArchiveHeader(b *bufio.Reader, name string) int { + // architecture-independent object file output + const HeaderSize = 60 + + var buf [HeaderSize]byte + if _, err := io.ReadFull(b, buf[:]); err != nil { + return -1 + } + aname := strings.Trim(string(buf[0:16]), " ") + if !strings.HasPrefix(aname, name) { + return -1 + } + asize := strings.Trim(string(buf[48:58]), " ") + i, _ := strconv.Atoi(asize) + return i +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 1db408613c9..6cdab448eca 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -11,7 +11,6 @@ import ( "go/token" "go/types" "sort" - "strings" "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" @@ -71,7 +70,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data [] } s := string(data) - s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) pkg = readUnifiedPackage(fset, nil, imports, input) return @@ -266,7 +264,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { func (r *reader) doPkg() *types.Package { path := r.String() switch path { - case "": + // cmd/compile emits path="main" for main packages because + // that's the linker symbol prefix it used; but we need + // the package's path as it would be reported by go list, + // hence "main" below. + // See test at go/packages.TestMainPackagePathInModeTypes. + case "", "main": path = r.p.PkgPath() case "builtin": return nil // universe diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index cdaac9ab34d..9f0b871ff6b 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -268,6 +268,8 @@ var PackageSymbols = map[string][]Symbol{ {"ErrTooLarge", Var, 0}, {"Fields", Func, 0}, {"FieldsFunc", Func, 0}, + {"FieldsFuncSeq", Func, 24}, + {"FieldsSeq", Func, 24}, {"HasPrefix", Func, 0}, {"HasSuffix", Func, 0}, {"Index", Func, 0}, @@ -280,6 +282,7 @@ var PackageSymbols = map[string][]Symbol{ {"LastIndexAny", Func, 0}, {"LastIndexByte", Func, 5}, {"LastIndexFunc", Func, 0}, + {"Lines", Func, 24}, {"Map", Func, 0}, {"MinRead", Const, 0}, {"NewBuffer", Func, 0}, @@ -293,7 +296,9 @@ var PackageSymbols = map[string][]Symbol{ {"Split", Func, 0}, {"SplitAfter", Func, 0}, {"SplitAfterN", Func, 0}, + {"SplitAfterSeq", Func, 24}, {"SplitN", Func, 0}, + {"SplitSeq", Func, 24}, {"Title", Func, 0}, {"ToLower", Func, 0}, {"ToLowerSpecial", Func, 0}, @@ -535,6 +540,7 @@ var PackageSymbols = map[string][]Symbol{ {"NewCTR", Func, 0}, {"NewGCM", Func, 2}, {"NewGCMWithNonceSize", Func, 5}, + {"NewGCMWithRandomNonce", Func, 24}, {"NewGCMWithTagSize", Func, 11}, {"NewOFB", Func, 0}, {"Stream", Type, 0}, @@ -673,6 +679,14 @@ var PackageSymbols = map[string][]Symbol{ {"Unmarshal", Func, 0}, {"UnmarshalCompressed", Func, 15}, }, + "crypto/fips140": { + {"Enabled", Func, 24}, + }, + "crypto/hkdf": { + {"Expand", Func, 24}, + {"Extract", Func, 24}, + {"Key", Func, 24}, + }, "crypto/hmac": { {"Equal", Func, 1}, {"New", Func, 0}, @@ -683,11 +697,43 @@ var PackageSymbols = map[string][]Symbol{ {"Size", Const, 0}, {"Sum", Func, 2}, }, + "crypto/mlkem": { + {"(*DecapsulationKey1024).Bytes", Method, 24}, + {"(*DecapsulationKey1024).Decapsulate", Method, 24}, + {"(*DecapsulationKey1024).EncapsulationKey", Method, 24}, + {"(*DecapsulationKey768).Bytes", Method, 24}, + {"(*DecapsulationKey768).Decapsulate", Method, 24}, + {"(*DecapsulationKey768).EncapsulationKey", Method, 24}, + {"(*EncapsulationKey1024).Bytes", Method, 24}, + {"(*EncapsulationKey1024).Encapsulate", Method, 24}, + {"(*EncapsulationKey768).Bytes", Method, 24}, + {"(*EncapsulationKey768).Encapsulate", Method, 24}, + {"CiphertextSize1024", Const, 24}, + {"CiphertextSize768", Const, 24}, + {"DecapsulationKey1024", Type, 24}, + {"DecapsulationKey768", Type, 24}, + {"EncapsulationKey1024", Type, 24}, + {"EncapsulationKey768", Type, 24}, + {"EncapsulationKeySize1024", Const, 24}, + {"EncapsulationKeySize768", Const, 24}, + {"GenerateKey1024", Func, 24}, + {"GenerateKey768", Func, 24}, + {"NewDecapsulationKey1024", Func, 24}, + {"NewDecapsulationKey768", Func, 24}, + {"NewEncapsulationKey1024", Func, 24}, + {"NewEncapsulationKey768", Func, 24}, + {"SeedSize", Const, 24}, + {"SharedKeySize", Const, 24}, + }, + "crypto/pbkdf2": { + {"Key", Func, 24}, + }, "crypto/rand": { {"Int", Func, 0}, {"Prime", Func, 0}, {"Read", Func, 0}, {"Reader", Var, 0}, + {"Text", Func, 24}, }, "crypto/rc4": { {"(*Cipher).Reset", Method, 0}, @@ -766,6 +812,39 @@ var PackageSymbols = map[string][]Symbol{ {"Sum224", Func, 2}, {"Sum256", Func, 2}, }, + "crypto/sha3": { + {"(*SHA3).AppendBinary", Method, 24}, + {"(*SHA3).BlockSize", Method, 24}, + {"(*SHA3).MarshalBinary", Method, 24}, + {"(*SHA3).Reset", Method, 24}, + {"(*SHA3).Size", Method, 24}, + {"(*SHA3).Sum", Method, 24}, + {"(*SHA3).UnmarshalBinary", Method, 24}, + {"(*SHA3).Write", Method, 24}, + {"(*SHAKE).AppendBinary", Method, 24}, + {"(*SHAKE).BlockSize", Method, 24}, + {"(*SHAKE).MarshalBinary", Method, 24}, + {"(*SHAKE).Read", Method, 24}, + {"(*SHAKE).Reset", Method, 24}, + {"(*SHAKE).UnmarshalBinary", Method, 24}, + {"(*SHAKE).Write", Method, 24}, + {"New224", Func, 24}, + {"New256", Func, 24}, + {"New384", Func, 24}, + {"New512", Func, 24}, + {"NewCSHAKE128", Func, 24}, + {"NewCSHAKE256", Func, 24}, + {"NewSHAKE128", Func, 24}, + {"NewSHAKE256", Func, 24}, + {"SHA3", Type, 24}, + {"SHAKE", Type, 24}, + {"Sum224", Func, 24}, + {"Sum256", Func, 24}, + {"Sum384", Func, 24}, + {"Sum512", Func, 24}, + {"SumSHAKE128", Func, 24}, + {"SumSHAKE256", Func, 24}, + }, "crypto/sha512": { {"BlockSize", Const, 0}, {"New", Func, 0}, @@ -788,6 +867,7 @@ var PackageSymbols = map[string][]Symbol{ {"ConstantTimeEq", Func, 0}, {"ConstantTimeLessOrEq", Func, 2}, {"ConstantTimeSelect", Func, 0}, + {"WithDataIndependentTiming", Func, 24}, {"XORBytes", Func, 20}, }, "crypto/tls": { @@ -864,6 +944,7 @@ var PackageSymbols = map[string][]Symbol{ {"ClientHelloInfo", Type, 4}, {"ClientHelloInfo.CipherSuites", Field, 4}, {"ClientHelloInfo.Conn", Field, 8}, + {"ClientHelloInfo.Extensions", Field, 24}, {"ClientHelloInfo.ServerName", Field, 4}, {"ClientHelloInfo.SignatureSchemes", Field, 8}, {"ClientHelloInfo.SupportedCurves", Field, 4}, @@ -881,6 +962,7 @@ var PackageSymbols = map[string][]Symbol{ {"Config.CurvePreferences", Field, 3}, {"Config.DynamicRecordSizingDisabled", Field, 7}, {"Config.EncryptedClientHelloConfigList", Field, 23}, + {"Config.EncryptedClientHelloKeys", Field, 24}, {"Config.EncryptedClientHelloRejectionVerify", Field, 23}, {"Config.GetCertificate", Field, 4}, {"Config.GetClientCertificate", Field, 8}, @@ -934,6 +1016,10 @@ var PackageSymbols = map[string][]Symbol{ {"ECHRejectionError", Type, 23}, {"ECHRejectionError.RetryConfigList", Field, 23}, {"Ed25519", Const, 13}, + {"EncryptedClientHelloKey", Type, 24}, + {"EncryptedClientHelloKey.Config", Field, 24}, + {"EncryptedClientHelloKey.PrivateKey", Field, 24}, + {"EncryptedClientHelloKey.SendAsRetry", Field, 24}, {"InsecureCipherSuites", Func, 14}, {"Listen", Func, 0}, {"LoadX509KeyPair", Func, 0}, @@ -1032,6 +1118,7 @@ var PackageSymbols = map[string][]Symbol{ {"VersionTLS12", Const, 2}, {"VersionTLS13", Const, 12}, {"X25519", Const, 8}, + {"X25519MLKEM768", Const, 24}, {"X509KeyPair", Func, 0}, }, "crypto/x509": { @@ -1056,6 +1143,8 @@ var PackageSymbols = map[string][]Symbol{ {"(ConstraintViolationError).Error", Method, 0}, {"(HostnameError).Error", Method, 0}, {"(InsecureAlgorithmError).Error", Method, 6}, + {"(OID).AppendBinary", Method, 24}, + {"(OID).AppendText", Method, 24}, {"(OID).Equal", Method, 22}, {"(OID).EqualASN1OID", Method, 22}, {"(OID).MarshalBinary", Method, 23}, @@ -1084,6 +1173,10 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.Extensions", Field, 2}, {"Certificate.ExtraExtensions", Field, 2}, {"Certificate.IPAddresses", Field, 1}, + {"Certificate.InhibitAnyPolicy", Field, 24}, + {"Certificate.InhibitAnyPolicyZero", Field, 24}, + {"Certificate.InhibitPolicyMapping", Field, 24}, + {"Certificate.InhibitPolicyMappingZero", Field, 24}, {"Certificate.IsCA", Field, 0}, {"Certificate.Issuer", Field, 0}, {"Certificate.IssuingCertificateURL", Field, 2}, @@ -1100,6 +1193,7 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.PermittedURIDomains", Field, 10}, {"Certificate.Policies", Field, 22}, {"Certificate.PolicyIdentifiers", Field, 0}, + {"Certificate.PolicyMappings", Field, 24}, {"Certificate.PublicKey", Field, 0}, {"Certificate.PublicKeyAlgorithm", Field, 0}, {"Certificate.Raw", Field, 0}, @@ -1107,6 +1201,8 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.RawSubject", Field, 0}, {"Certificate.RawSubjectPublicKeyInfo", Field, 0}, {"Certificate.RawTBSCertificate", Field, 0}, + {"Certificate.RequireExplicitPolicy", Field, 24}, + {"Certificate.RequireExplicitPolicyZero", Field, 24}, {"Certificate.SerialNumber", Field, 0}, {"Certificate.Signature", Field, 0}, {"Certificate.SignatureAlgorithm", Field, 0}, @@ -1198,6 +1294,7 @@ var PackageSymbols = map[string][]Symbol{ {"NameConstraintsWithoutSANs", Const, 10}, {"NameMismatch", Const, 8}, {"NewCertPool", Func, 0}, + {"NoValidChains", Const, 24}, {"NotAuthorizedToSign", Const, 0}, {"OID", Type, 22}, {"OIDFromInts", Func, 22}, @@ -1219,6 +1316,9 @@ var PackageSymbols = map[string][]Symbol{ {"ParsePKCS8PrivateKey", Func, 0}, {"ParsePKIXPublicKey", Func, 0}, {"ParseRevocationList", Func, 19}, + {"PolicyMapping", Type, 24}, + {"PolicyMapping.IssuerDomainPolicy", Field, 24}, + {"PolicyMapping.SubjectDomainPolicy", Field, 24}, {"PublicKeyAlgorithm", Type, 0}, {"PureEd25519", Const, 13}, {"RSA", Const, 0}, @@ -1265,6 +1365,7 @@ var PackageSymbols = map[string][]Symbol{ {"UnknownPublicKeyAlgorithm", Const, 0}, {"UnknownSignatureAlgorithm", Const, 0}, {"VerifyOptions", Type, 0}, + {"VerifyOptions.CertificatePolicies", Field, 24}, {"VerifyOptions.CurrentTime", Field, 0}, {"VerifyOptions.DNSName", Field, 0}, {"VerifyOptions.Intermediates", Field, 0}, @@ -1975,6 +2076,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*File).DynString", Method, 1}, {"(*File).DynValue", Method, 21}, {"(*File).DynamicSymbols", Method, 4}, + {"(*File).DynamicVersionNeeds", Method, 24}, + {"(*File).DynamicVersions", Method, 24}, {"(*File).ImportedLibraries", Method, 0}, {"(*File).ImportedSymbols", Method, 0}, {"(*File).Section", Method, 0}, @@ -2240,6 +2343,19 @@ var PackageSymbols = map[string][]Symbol{ {"DynFlag", Type, 0}, {"DynFlag1", Type, 21}, {"DynTag", Type, 0}, + {"DynamicVersion", Type, 24}, + {"DynamicVersion.Deps", Field, 24}, + {"DynamicVersion.Flags", Field, 24}, + {"DynamicVersion.Index", Field, 24}, + {"DynamicVersion.Name", Field, 24}, + {"DynamicVersionDep", Type, 24}, + {"DynamicVersionDep.Dep", Field, 24}, + {"DynamicVersionDep.Flags", Field, 24}, + {"DynamicVersionDep.Index", Field, 24}, + {"DynamicVersionFlag", Type, 24}, + {"DynamicVersionNeed", Type, 24}, + {"DynamicVersionNeed.Name", Field, 24}, + {"DynamicVersionNeed.Needs", Field, 24}, {"EI_ABIVERSION", Const, 0}, {"EI_CLASS", Const, 0}, {"EI_DATA", Const, 0}, @@ -3726,8 +3842,19 @@ var PackageSymbols = map[string][]Symbol{ {"Symbol.Size", Field, 0}, {"Symbol.Value", Field, 0}, {"Symbol.Version", Field, 13}, + {"Symbol.VersionIndex", Field, 24}, + {"Symbol.VersionScope", Field, 24}, + {"SymbolVersionScope", Type, 24}, {"Type", Type, 0}, + {"VER_FLG_BASE", Const, 24}, + {"VER_FLG_INFO", Const, 24}, + {"VER_FLG_WEAK", Const, 24}, {"Version", Type, 0}, + {"VersionScopeGlobal", Const, 24}, + {"VersionScopeHidden", Const, 24}, + {"VersionScopeLocal", Const, 24}, + {"VersionScopeNone", Const, 24}, + {"VersionScopeSpecific", Const, 24}, }, "debug/gosym": { {"(*DecodingError).Error", Method, 0}, @@ -4453,8 +4580,10 @@ var PackageSymbols = map[string][]Symbol{ {"FS", Type, 16}, }, "encoding": { + {"BinaryAppender", Type, 24}, {"BinaryMarshaler", Type, 2}, {"BinaryUnmarshaler", Type, 2}, + {"TextAppender", Type, 24}, {"TextMarshaler", Type, 2}, {"TextUnmarshaler", Type, 2}, }, @@ -5984,13 +6113,16 @@ var PackageSymbols = map[string][]Symbol{ {"(*Interface).Complete", Method, 5}, {"(*Interface).Embedded", Method, 5}, {"(*Interface).EmbeddedType", Method, 11}, + {"(*Interface).EmbeddedTypes", Method, 24}, {"(*Interface).Empty", Method, 5}, {"(*Interface).ExplicitMethod", Method, 5}, + {"(*Interface).ExplicitMethods", Method, 24}, {"(*Interface).IsComparable", Method, 18}, {"(*Interface).IsImplicit", Method, 18}, {"(*Interface).IsMethodSet", Method, 18}, {"(*Interface).MarkImplicit", Method, 18}, {"(*Interface).Method", Method, 5}, + {"(*Interface).Methods", Method, 24}, {"(*Interface).NumEmbeddeds", Method, 5}, {"(*Interface).NumExplicitMethods", Method, 5}, {"(*Interface).NumMethods", Method, 5}, @@ -6011,9 +6143,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*MethodSet).At", Method, 5}, {"(*MethodSet).Len", Method, 5}, {"(*MethodSet).Lookup", Method, 5}, + {"(*MethodSet).Methods", Method, 24}, {"(*MethodSet).String", Method, 5}, {"(*Named).AddMethod", Method, 5}, {"(*Named).Method", Method, 5}, + {"(*Named).Methods", Method, 24}, {"(*Named).NumMethods", Method, 5}, {"(*Named).Obj", Method, 5}, {"(*Named).Origin", Method, 18}, @@ -6054,6 +6188,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Pointer).String", Method, 5}, {"(*Pointer).Underlying", Method, 5}, {"(*Scope).Child", Method, 5}, + {"(*Scope).Children", Method, 24}, {"(*Scope).Contains", Method, 5}, {"(*Scope).End", Method, 5}, {"(*Scope).Innermost", Method, 5}, @@ -6089,6 +6224,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*StdSizes).Offsetsof", Method, 5}, {"(*StdSizes).Sizeof", Method, 5}, {"(*Struct).Field", Method, 5}, + {"(*Struct).Fields", Method, 24}, {"(*Struct).NumFields", Method, 5}, {"(*Struct).String", Method, 5}, {"(*Struct).Tag", Method, 5}, @@ -6100,8 +6236,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Tuple).Len", Method, 5}, {"(*Tuple).String", Method, 5}, {"(*Tuple).Underlying", Method, 5}, + {"(*Tuple).Variables", Method, 24}, {"(*TypeList).At", Method, 18}, {"(*TypeList).Len", Method, 18}, + {"(*TypeList).Types", Method, 24}, {"(*TypeName).Exported", Method, 5}, {"(*TypeName).Id", Method, 5}, {"(*TypeName).IsAlias", Method, 9}, @@ -6119,9 +6257,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*TypeParam).Underlying", Method, 18}, {"(*TypeParamList).At", Method, 18}, {"(*TypeParamList).Len", Method, 18}, + {"(*TypeParamList).TypeParams", Method, 24}, {"(*Union).Len", Method, 18}, {"(*Union).String", Method, 18}, {"(*Union).Term", Method, 18}, + {"(*Union).Terms", Method, 24}, {"(*Union).Underlying", Method, 18}, {"(*Var).Anonymous", Method, 5}, {"(*Var).Embedded", Method, 11}, @@ -6392,10 +6532,12 @@ var PackageSymbols = map[string][]Symbol{ {"(*Hash).WriteByte", Method, 14}, {"(*Hash).WriteString", Method, 14}, {"Bytes", Func, 19}, + {"Comparable", Func, 24}, {"Hash", Type, 14}, {"MakeSeed", Func, 14}, {"Seed", Type, 14}, {"String", Func, 19}, + {"WriteComparable", Func, 24}, }, "html": { {"EscapeString", Func, 0}, @@ -7082,6 +7224,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*JSONHandler).WithGroup", Method, 21}, {"(*Level).UnmarshalJSON", Method, 21}, {"(*Level).UnmarshalText", Method, 21}, + {"(*LevelVar).AppendText", Method, 24}, {"(*LevelVar).Level", Method, 21}, {"(*LevelVar).MarshalText", Method, 21}, {"(*LevelVar).Set", Method, 21}, @@ -7110,6 +7253,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Attr).Equal", Method, 21}, {"(Attr).String", Method, 21}, {"(Kind).String", Method, 21}, + {"(Level).AppendText", Method, 24}, {"(Level).Level", Method, 21}, {"(Level).MarshalJSON", Method, 21}, {"(Level).MarshalText", Method, 21}, @@ -7140,6 +7284,7 @@ var PackageSymbols = map[string][]Symbol{ {"Debug", Func, 21}, {"DebugContext", Func, 21}, {"Default", Func, 21}, + {"DiscardHandler", Var, 24}, {"Duration", Func, 21}, {"DurationValue", Func, 21}, {"Error", Func, 21}, @@ -7375,6 +7520,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Float).Acc", Method, 5}, {"(*Float).Add", Method, 5}, {"(*Float).Append", Method, 5}, + {"(*Float).AppendText", Method, 24}, {"(*Float).Cmp", Method, 5}, {"(*Float).Copy", Method, 5}, {"(*Float).Float32", Method, 5}, @@ -7421,6 +7567,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Int).And", Method, 0}, {"(*Int).AndNot", Method, 0}, {"(*Int).Append", Method, 6}, + {"(*Int).AppendText", Method, 24}, {"(*Int).Binomial", Method, 0}, {"(*Int).Bit", Method, 0}, {"(*Int).BitLen", Method, 0}, @@ -7477,6 +7624,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Int).Xor", Method, 0}, {"(*Rat).Abs", Method, 0}, {"(*Rat).Add", Method, 0}, + {"(*Rat).AppendText", Method, 24}, {"(*Rat).Cmp", Method, 0}, {"(*Rat).Denom", Method, 0}, {"(*Rat).Float32", Method, 4}, @@ -7659,11 +7807,13 @@ var PackageSymbols = map[string][]Symbol{ {"Zipf", Type, 0}, }, "math/rand/v2": { + {"(*ChaCha8).AppendBinary", Method, 24}, {"(*ChaCha8).MarshalBinary", Method, 22}, {"(*ChaCha8).Read", Method, 23}, {"(*ChaCha8).Seed", Method, 22}, {"(*ChaCha8).Uint64", Method, 22}, {"(*ChaCha8).UnmarshalBinary", Method, 22}, + {"(*PCG).AppendBinary", Method, 24}, {"(*PCG).MarshalBinary", Method, 22}, {"(*PCG).Seed", Method, 22}, {"(*PCG).Uint64", Method, 22}, @@ -7931,6 +8081,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*UnixListener).SyscallConn", Method, 10}, {"(Flags).String", Method, 0}, {"(HardwareAddr).String", Method, 0}, + {"(IP).AppendText", Method, 24}, {"(IP).DefaultMask", Method, 0}, {"(IP).Equal", Method, 0}, {"(IP).IsGlobalUnicast", Method, 0}, @@ -8131,6 +8282,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*MaxBytesError).Error", Method, 19}, {"(*ProtocolError).Error", Method, 0}, {"(*ProtocolError).Is", Method, 21}, + {"(*Protocols).SetHTTP1", Method, 24}, + {"(*Protocols).SetHTTP2", Method, 24}, + {"(*Protocols).SetUnencryptedHTTP2", Method, 24}, {"(*Request).AddCookie", Method, 0}, {"(*Request).BasicAuth", Method, 4}, {"(*Request).Clone", Method, 13}, @@ -8190,6 +8344,10 @@ var PackageSymbols = map[string][]Symbol{ {"(Header).Values", Method, 14}, {"(Header).Write", Method, 0}, {"(Header).WriteSubset", Method, 0}, + {"(Protocols).HTTP1", Method, 24}, + {"(Protocols).HTTP2", Method, 24}, + {"(Protocols).String", Method, 24}, + {"(Protocols).UnencryptedHTTP2", Method, 24}, {"AllowQuerySemicolons", Func, 17}, {"CanonicalHeaderKey", Func, 0}, {"Client", Type, 0}, @@ -8252,6 +8410,18 @@ var PackageSymbols = map[string][]Symbol{ {"FileSystem", Type, 0}, {"Flusher", Type, 0}, {"Get", Func, 0}, + {"HTTP2Config", Type, 24}, + {"HTTP2Config.CountError", Field, 24}, + {"HTTP2Config.MaxConcurrentStreams", Field, 24}, + {"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24}, + {"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24}, + {"HTTP2Config.MaxReadFrameSize", Field, 24}, + {"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24}, + {"HTTP2Config.MaxReceiveBufferPerStream", Field, 24}, + {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24}, + {"HTTP2Config.PingTimeout", Field, 24}, + {"HTTP2Config.SendPingTimeout", Field, 24}, + {"HTTP2Config.WriteByteTimeout", Field, 24}, {"Handle", Func, 0}, {"HandleFunc", Func, 0}, {"Handler", Type, 0}, @@ -8292,6 +8462,7 @@ var PackageSymbols = map[string][]Symbol{ {"PostForm", Func, 0}, {"ProtocolError", Type, 0}, {"ProtocolError.ErrorString", Field, 0}, + {"Protocols", Type, 24}, {"ProxyFromEnvironment", Func, 0}, {"ProxyURL", Func, 0}, {"PushOptions", Type, 8}, @@ -8361,9 +8532,11 @@ var PackageSymbols = map[string][]Symbol{ {"Server.ConnState", Field, 3}, {"Server.DisableGeneralOptionsHandler", Field, 20}, {"Server.ErrorLog", Field, 3}, + {"Server.HTTP2", Field, 24}, {"Server.Handler", Field, 0}, {"Server.IdleTimeout", Field, 8}, {"Server.MaxHeaderBytes", Field, 0}, + {"Server.Protocols", Field, 24}, {"Server.ReadHeaderTimeout", Field, 8}, {"Server.ReadTimeout", Field, 0}, {"Server.TLSConfig", Field, 0}, @@ -8453,12 +8626,14 @@ var PackageSymbols = map[string][]Symbol{ {"Transport.ExpectContinueTimeout", Field, 6}, {"Transport.ForceAttemptHTTP2", Field, 13}, {"Transport.GetProxyConnectHeader", Field, 16}, + {"Transport.HTTP2", Field, 24}, {"Transport.IdleConnTimeout", Field, 7}, {"Transport.MaxConnsPerHost", Field, 11}, {"Transport.MaxIdleConns", Field, 7}, {"Transport.MaxIdleConnsPerHost", Field, 0}, {"Transport.MaxResponseHeaderBytes", Field, 7}, {"Transport.OnProxyConnectResponse", Field, 20}, + {"Transport.Protocols", Field, 24}, {"Transport.Proxy", Field, 0}, {"Transport.ProxyConnectHeader", Field, 8}, {"Transport.ReadBufferSize", Field, 13}, @@ -8646,6 +8821,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*AddrPort).UnmarshalText", Method, 18}, {"(*Prefix).UnmarshalBinary", Method, 18}, {"(*Prefix).UnmarshalText", Method, 18}, + {"(Addr).AppendBinary", Method, 24}, + {"(Addr).AppendText", Method, 24}, {"(Addr).AppendTo", Method, 18}, {"(Addr).As16", Method, 18}, {"(Addr).As4", Method, 18}, @@ -8676,6 +8853,8 @@ var PackageSymbols = map[string][]Symbol{ {"(Addr).WithZone", Method, 18}, {"(Addr).Zone", Method, 18}, {"(AddrPort).Addr", Method, 18}, + {"(AddrPort).AppendBinary", Method, 24}, + {"(AddrPort).AppendText", Method, 24}, {"(AddrPort).AppendTo", Method, 18}, {"(AddrPort).Compare", Method, 22}, {"(AddrPort).IsValid", Method, 18}, @@ -8684,6 +8863,8 @@ var PackageSymbols = map[string][]Symbol{ {"(AddrPort).Port", Method, 18}, {"(AddrPort).String", Method, 18}, {"(Prefix).Addr", Method, 18}, + {"(Prefix).AppendBinary", Method, 24}, + {"(Prefix).AppendText", Method, 24}, {"(Prefix).AppendTo", Method, 18}, {"(Prefix).Bits", Method, 18}, {"(Prefix).Contains", Method, 18}, @@ -8868,6 +9049,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Error).Temporary", Method, 6}, {"(*Error).Timeout", Method, 6}, {"(*Error).Unwrap", Method, 13}, + {"(*URL).AppendBinary", Method, 24}, {"(*URL).EscapedFragment", Method, 15}, {"(*URL).EscapedPath", Method, 5}, {"(*URL).Hostname", Method, 8}, @@ -8967,6 +9149,17 @@ var PackageSymbols = map[string][]Symbol{ {"(*ProcessState).SysUsage", Method, 0}, {"(*ProcessState).SystemTime", Method, 0}, {"(*ProcessState).UserTime", Method, 0}, + {"(*Root).Close", Method, 24}, + {"(*Root).Create", Method, 24}, + {"(*Root).FS", Method, 24}, + {"(*Root).Lstat", Method, 24}, + {"(*Root).Mkdir", Method, 24}, + {"(*Root).Name", Method, 24}, + {"(*Root).Open", Method, 24}, + {"(*Root).OpenFile", Method, 24}, + {"(*Root).OpenRoot", Method, 24}, + {"(*Root).Remove", Method, 24}, + {"(*Root).Stat", Method, 24}, {"(*SyscallError).Error", Method, 0}, {"(*SyscallError).Timeout", Method, 10}, {"(*SyscallError).Unwrap", Method, 13}, @@ -9060,6 +9253,8 @@ var PackageSymbols = map[string][]Symbol{ {"O_WRONLY", Const, 0}, {"Open", Func, 0}, {"OpenFile", Func, 0}, + {"OpenInRoot", Func, 24}, + {"OpenRoot", Func, 24}, {"PathError", Type, 0}, {"PathError.Err", Field, 0}, {"PathError.Op", Field, 0}, @@ -9081,6 +9276,7 @@ var PackageSymbols = map[string][]Symbol{ {"Remove", Func, 0}, {"RemoveAll", Func, 0}, {"Rename", Func, 0}, + {"Root", Type, 24}, {"SEEK_CUR", Const, 0}, {"SEEK_END", Const, 0}, {"SEEK_SET", Const, 0}, @@ -9422,6 +9618,7 @@ var PackageSymbols = map[string][]Symbol{ {"Zero", Func, 0}, }, "regexp": { + {"(*Regexp).AppendText", Method, 24}, {"(*Regexp).Copy", Method, 6}, {"(*Regexp).Expand", Method, 0}, {"(*Regexp).ExpandString", Method, 0}, @@ -9602,6 +9799,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*StackRecord).Stack", Method, 0}, {"(*TypeAssertionError).Error", Method, 0}, {"(*TypeAssertionError).RuntimeError", Method, 0}, + {"(Cleanup).Stop", Method, 24}, + {"AddCleanup", Func, 24}, {"BlockProfile", Func, 1}, {"BlockProfileRecord", Type, 1}, {"BlockProfileRecord.Count", Field, 1}, @@ -9612,6 +9811,7 @@ var PackageSymbols = map[string][]Symbol{ {"Caller", Func, 0}, {"Callers", Func, 0}, {"CallersFrames", Func, 7}, + {"Cleanup", Type, 24}, {"Compiler", Const, 0}, {"Error", Type, 0}, {"Frame", Type, 7}, @@ -9974,6 +10174,8 @@ var PackageSymbols = map[string][]Symbol{ {"EqualFold", Func, 0}, {"Fields", Func, 0}, {"FieldsFunc", Func, 0}, + {"FieldsFuncSeq", Func, 24}, + {"FieldsSeq", Func, 24}, {"HasPrefix", Func, 0}, {"HasSuffix", Func, 0}, {"Index", Func, 0}, @@ -9986,6 +10188,7 @@ var PackageSymbols = map[string][]Symbol{ {"LastIndexAny", Func, 0}, {"LastIndexByte", Func, 5}, {"LastIndexFunc", Func, 0}, + {"Lines", Func, 24}, {"Map", Func, 0}, {"NewReader", Func, 0}, {"NewReplacer", Func, 0}, @@ -9997,7 +10200,9 @@ var PackageSymbols = map[string][]Symbol{ {"Split", Func, 0}, {"SplitAfter", Func, 0}, {"SplitAfterN", Func, 0}, + {"SplitAfterSeq", Func, 24}, {"SplitN", Func, 0}, + {"SplitSeq", Func, 24}, {"Title", Func, 0}, {"ToLower", Func, 0}, {"ToLowerSpecial", Func, 0}, @@ -16413,7 +16618,9 @@ var PackageSymbols = map[string][]Symbol{ {"ValueOf", Func, 0}, }, "testing": { + {"(*B).Chdir", Method, 24}, {"(*B).Cleanup", Method, 14}, + {"(*B).Context", Method, 24}, {"(*B).Elapsed", Method, 20}, {"(*B).Error", Method, 0}, {"(*B).Errorf", Method, 0}, @@ -16425,6 +16632,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).Helper", Method, 9}, {"(*B).Log", Method, 0}, {"(*B).Logf", Method, 0}, + {"(*B).Loop", Method, 24}, {"(*B).Name", Method, 8}, {"(*B).ReportAllocs", Method, 1}, {"(*B).ReportMetric", Method, 13}, @@ -16442,7 +16650,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).StopTimer", Method, 0}, {"(*B).TempDir", Method, 15}, {"(*F).Add", Method, 18}, + {"(*F).Chdir", Method, 24}, {"(*F).Cleanup", Method, 18}, + {"(*F).Context", Method, 24}, {"(*F).Error", Method, 18}, {"(*F).Errorf", Method, 18}, {"(*F).Fail", Method, 18}, @@ -16463,7 +16673,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*F).TempDir", Method, 18}, {"(*M).Run", Method, 4}, {"(*PB).Next", Method, 3}, + {"(*T).Chdir", Method, 24}, {"(*T).Cleanup", Method, 14}, + {"(*T).Context", Method, 24}, {"(*T).Deadline", Method, 15}, {"(*T).Error", Method, 0}, {"(*T).Errorf", Method, 0}, @@ -16954,7 +17166,9 @@ var PackageSymbols = map[string][]Symbol{ {"(Time).Add", Method, 0}, {"(Time).AddDate", Method, 0}, {"(Time).After", Method, 0}, + {"(Time).AppendBinary", Method, 24}, {"(Time).AppendFormat", Method, 5}, + {"(Time).AppendText", Method, 24}, {"(Time).Before", Method, 0}, {"(Time).Clock", Method, 0}, {"(Time).Compare", Method, 20}, @@ -17428,4 +17642,9 @@ var PackageSymbols = map[string][]Symbol{ {"String", Func, 0}, {"StringData", Func, 0}, }, + "weak": { + {"(Pointer).Value", Method, 24}, + {"Make", Func, 24}, + {"Pointer", Type, 24}, + }, } diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index 0b84acc5c7f..cdae2b8e818 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -66,75 +66,3 @@ func IsTypeParam(t types.Type) bool { _, ok := types.Unalias(t).(*types.TypeParam) return ok } - -// GenericAssignableTo is a generalization of types.AssignableTo that -// implements the following rule for uninstantiated generic types: -// -// If V and T are generic named types, then V is considered assignable to T if, -// for every possible instantiation of V[A_1, ..., A_N], the instantiation -// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. -// -// If T has structural constraints, they must be satisfied by V. -// -// For example, consider the following type declarations: -// -// type Interface[T any] interface { -// Accept(T) -// } -// -// type Container[T any] struct { -// Element T -// } -// -// func (c Container[T]) Accept(t T) { c.Element = t } -// -// In this case, GenericAssignableTo reports that instantiations of Container -// are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { - V = types.Unalias(V) - T = types.Unalias(T) - - // If V and T are not both named, or do not have matching non-empty type - // parameter lists, fall back on types.AssignableTo. - - VN, Vnamed := V.(*types.Named) - TN, Tnamed := T.(*types.Named) - if !Vnamed || !Tnamed { - return types.AssignableTo(V, T) - } - - vtparams := VN.TypeParams() - ttparams := TN.TypeParams() - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { - return types.AssignableTo(V, T) - } - - // V and T have the same (non-zero) number of type params. Instantiate both - // with the type parameters of V. This must always succeed for V, and will - // succeed for T if and only if the type set of each type parameter of V is a - // subset of the type set of the corresponding type parameter of T, meaning - // that every instantiation of V corresponds to a valid instantiation of T. - - // Minor optimization: ensure we share a context across the two - // instantiations below. - if ctxt == nil { - ctxt = types.NewContext() - } - - var targs []types.Type - for i := 0; i < vtparams.Len(); i++ { - targs = append(targs, vtparams.At(i)) - } - - vinst, err := types.Instantiate(ctxt, V, targs, true) - if err != nil { - panic("type parameters should satisfy their own constraints") - } - - tinst, err := types.Instantiate(ctxt, T, targs, true) - if err != nil { - return false - } - - return types.AssignableTo(vinst, tinst) -} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go new file mode 100644 index 00000000000..b64f714eb30 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go @@ -0,0 +1,46 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/ast" + "go/types" + "strconv" +) + +// FileQualifier returns a [types.Qualifier] function that qualifies +// imported symbols appropriately based on the import environment of a given +// file. +// If the same package is imported multiple times, the last appearance is +// recorded. +func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier { + // Construct mapping of import paths to their defined names. + // It is only necessary to look at renaming imports. + imports := make(map[string]string) + for _, imp := range f.Imports { + if imp.Name != nil && imp.Name.Name != "_" { + path, _ := strconv.Unquote(imp.Path.Value) + imports[path] = imp.Name.Name + } + } + + // Define qualifier to replace full package paths with names of the imports. + return func(p *types.Package) string { + if p == nil || p == pkg { + return "" + } + + if name, ok := imports[p.Path()]; ok { + if name == "." { + return "" + } else { + return name + } + } + + // If there is no local renaming, fall back to the package name. + return p.Name() + } +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go index ba6f4f4ebd5..e54accc69a0 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -11,6 +11,8 @@ import ( // ReceiverNamed returns the named type (if any) associated with the // type of recv, which may be of the form N or *N, or aliases thereof. // It also reports whether a Pointer was present. +// +// The named result may be nil in ill-typed code. func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { t := recv.Type() if ptr, ok := types.Unalias(t).(*types.Pointer); ok { diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index df3ea521254..a93d51f9882 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -82,6 +82,7 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier { type NamedOrAlias interface { types.Type Obj() *types.TypeName + // TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22. } // TypeParams is a light shim around t.TypeParams(). diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go index 1066980649e..d272949c177 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -9,62 +9,97 @@ import ( "go/ast" "go/token" "go/types" - "strconv" "strings" ) -// ZeroString returns the string representation of the "zero" value of the type t. +// ZeroString returns the string representation of the zero value for any type t. +// The boolean result indicates whether the type is or contains an invalid type +// or a non-basic (constraint) interface type. +// +// Even for invalid input types, ZeroString may return a partially correct +// string representation. The caller should use the returned isValid boolean +// to determine the validity of the expression. +// +// When assigning to a wider type (such as 'any'), it's the caller's +// responsibility to handle any necessary type conversions. +// // This string can be used on the right-hand side of an assignment where the // left-hand side has that explicit type. +// References to named types are qualified by an appropriate (optional) +// qualifier function. // Exception: This does not apply to tuples. Their string representation is // informational only and cannot be used in an assignment. -// When assigning to a wider type (such as 'any'), it's the caller's -// responsibility to handle any necessary type conversions. +// // See [ZeroExpr] for a variant that returns an [ast.Expr]. -func ZeroString(t types.Type, qf types.Qualifier) string { +func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) { switch t := t.(type) { case *types.Basic: switch { case t.Info()&types.IsBoolean != 0: - return "false" + return "false", true case t.Info()&types.IsNumeric != 0: - return "0" + return "0", true case t.Info()&types.IsString != 0: - return `""` + return `""`, true case t.Kind() == types.UnsafePointer: fallthrough case t.Kind() == types.UntypedNil: - return "nil" + return "nil", true + case t.Kind() == types.Invalid: + return "invalid", false default: - panic(fmt.Sprint("ZeroString for unexpected type:", t)) + panic(fmt.Sprintf("ZeroString for unexpected type %v", t)) } - case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: - return "nil" + case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature: + return "nil", true + + case *types.Interface: + if !t.IsMethodSet() { + return "invalid", false + } + return "nil", true - case *types.Named, *types.Alias: + case *types.Named: switch under := t.Underlying().(type) { case *types.Struct, *types.Array: - return types.TypeString(t, qf) + "{}" + return types.TypeString(t, qual) + "{}", true + default: + return ZeroString(under, qual) + } + + case *types.Alias: + switch t.Underlying().(type) { + case *types.Struct, *types.Array: + return types.TypeString(t, qual) + "{}", true default: - return ZeroString(under, qf) + // A type parameter can have alias but alias type's underlying type + // can never be a type parameter. + // Use types.Unalias to preserve the info of type parameter instead + // of call Underlying() going right through and get the underlying + // type of the type parameter which is always an interface. + return ZeroString(types.Unalias(t), qual) } case *types.Array, *types.Struct: - return types.TypeString(t, qf) + "{}" + return types.TypeString(t, qual) + "{}", true case *types.TypeParam: // Assumes func new is not shadowed. - return "*new(" + types.TypeString(t, qf) + ")" + return "*new(" + types.TypeString(t, qual) + ")", true case *types.Tuple: // Tuples are not normal values. // We are currently format as "(t[0], ..., t[n])". Could be something else. + isValid := true components := make([]string, t.Len()) for i := 0; i < t.Len(); i++ { - components[i] = ZeroString(t.At(i).Type(), qf) + comp, ok := ZeroString(t.At(i).Type(), qual) + + components[i] = comp + isValid = isValid && ok } - return "(" + strings.Join(components, ", ") + ")" + return "(" + strings.Join(components, ", ") + ")", isValid case *types.Union: // Variables of these types cannot be created, so it makes @@ -76,45 +111,72 @@ func ZeroString(t types.Type, qf types.Qualifier) string { } } -// ZeroExpr returns the ast.Expr representation of the "zero" value of the type t. -// ZeroExpr is defined for types that are suitable for variables. -// It may panic for other types such as Tuple or Union. +// ZeroExpr returns the ast.Expr representation of the zero value for any type t. +// The boolean result indicates whether the type is or contains an invalid type +// or a non-basic (constraint) interface type. +// +// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr +// representation. The caller should use the returned isValid boolean to determine +// the validity of the expression. +// +// This function is designed for types suitable for variables and should not be +// used with Tuple or Union types.References to named types are qualified by an +// appropriate (optional) qualifier function. +// // See [ZeroString] for a variant that returns a string. -func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - switch t := typ.(type) { +func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) { + switch t := t.(type) { case *types.Basic: switch { case t.Info()&types.IsBoolean != 0: - return &ast.Ident{Name: "false"} + return &ast.Ident{Name: "false"}, true case t.Info()&types.IsNumeric != 0: - return &ast.BasicLit{Kind: token.INT, Value: "0"} + return &ast.BasicLit{Kind: token.INT, Value: "0"}, true case t.Info()&types.IsString != 0: - return &ast.BasicLit{Kind: token.STRING, Value: `""`} + return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true case t.Kind() == types.UnsafePointer: fallthrough case t.Kind() == types.UntypedNil: - return ast.NewIdent("nil") + return ast.NewIdent("nil"), true + case t.Kind() == types.Invalid: + return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false default: - panic(fmt.Sprint("ZeroExpr for unexpected type:", t)) + panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t)) } - case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: - return ast.NewIdent("nil") + case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature: + return ast.NewIdent("nil"), true + + case *types.Interface: + if !t.IsMethodSet() { + return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false + } + return ast.NewIdent("nil"), true - case *types.Named, *types.Alias: + case *types.Named: switch under := t.Underlying().(type) { case *types.Struct, *types.Array: return &ast.CompositeLit{ - Type: TypeExpr(f, pkg, typ), - } + Type: TypeExpr(t, qual), + }, true default: - return ZeroExpr(f, pkg, under) + return ZeroExpr(under, qual) + } + + case *types.Alias: + switch t.Underlying().(type) { + case *types.Struct, *types.Array: + return &ast.CompositeLit{ + Type: TypeExpr(t, qual), + }, true + default: + return ZeroExpr(types.Unalias(t), qual) } case *types.Array, *types.Struct: return &ast.CompositeLit{ - Type: TypeExpr(f, pkg, typ), - } + Type: TypeExpr(t, qual), + }, true case *types.TypeParam: return &ast.StarExpr{ // *new(T) @@ -125,7 +187,7 @@ func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { ast.NewIdent(t.Obj().Name()), }, }, - } + }, true case *types.Tuple: // Unlike ZeroString, there is no ast.Expr can express tuple by @@ -157,16 +219,14 @@ func IsZeroExpr(expr ast.Expr) bool { } // TypeExpr returns syntax for the specified type. References to named types -// from packages other than pkg are qualified by an appropriate package name, as -// defined by the import environment of file. +// are qualified by an appropriate (optional) qualifier function. // It may panic for types such as Tuple or Union. -func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - switch t := typ.(type) { +func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { + switch t := t.(type) { case *types.Basic: switch t.Kind() { case types.UnsafePointer: - // TODO(hxjiang): replace the implementation with types.Qualifier. - return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")} + return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")} default: return ast.NewIdent(t.Name()) } @@ -174,7 +234,7 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { case *types.Pointer: return &ast.UnaryExpr{ Op: token.MUL, - X: TypeExpr(f, pkg, t.Elem()), + X: TypeExpr(t.Elem(), qual), } case *types.Array: @@ -183,18 +243,18 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { Kind: token.INT, Value: fmt.Sprintf("%d", t.Len()), }, - Elt: TypeExpr(f, pkg, t.Elem()), + Elt: TypeExpr(t.Elem(), qual), } case *types.Slice: return &ast.ArrayType{ - Elt: TypeExpr(f, pkg, t.Elem()), + Elt: TypeExpr(t.Elem(), qual), } case *types.Map: return &ast.MapType{ - Key: TypeExpr(f, pkg, t.Key()), - Value: TypeExpr(f, pkg, t.Elem()), + Key: TypeExpr(t.Key(), qual), + Value: TypeExpr(t.Elem(), qual), } case *types.Chan: @@ -204,14 +264,14 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { } return &ast.ChanType{ Dir: dir, - Value: TypeExpr(f, pkg, t.Elem()), + Value: TypeExpr(t.Elem(), qual), } case *types.Signature: var params []*ast.Field for i := 0; i < t.Params().Len(); i++ { params = append(params, &ast.Field{ - Type: TypeExpr(f, pkg, t.Params().At(i).Type()), + Type: TypeExpr(t.Params().At(i).Type(), qual), Names: []*ast.Ident{ { Name: t.Params().At(i).Name(), @@ -226,7 +286,7 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { var returns []*ast.Field for i := 0; i < t.Results().Len(); i++ { returns = append(returns, &ast.Field{ - Type: TypeExpr(f, pkg, t.Results().At(i).Type()), + Type: TypeExpr(t.Results().At(i).Type(), qual), }) } return &ast.FuncType{ @@ -238,23 +298,9 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { }, } - case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam} - switch t.Obj().Pkg() { - case pkg, nil: - return ast.NewIdent(t.Obj().Name()) - } - pkgName := t.Obj().Pkg().Name() - - // TODO(hxjiang): replace the implementation with types.Qualifier. - // If the file already imports the package under another name, use that. - for _, cand := range f.Imports { - if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() { - if cand.Name != nil && cand.Name.Name != "" { - pkgName = cand.Name.Name - } - } - } - if pkgName == "." { + case *types.TypeParam: + pkgName := qual(t.Obj().Pkg()) + if pkgName == "" || t.Obj().Pkg() == nil { return ast.NewIdent(t.Obj().Name()) } return &ast.SelectorExpr{ @@ -262,6 +308,36 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { Sel: ast.NewIdent(t.Obj().Name()), } + // types.TypeParam also implements interface NamedOrAlias. To differentiate, + // case TypeParam need to be present before case NamedOrAlias. + // TODO(hxjiang): remove this comment once TypeArgs() is added to interface + // NamedOrAlias. + case NamedOrAlias: + var expr ast.Expr = ast.NewIdent(t.Obj().Name()) + if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" { + expr = &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: expr.(*ast.Ident), + } + } + + // TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to + // typesinternal.NamedOrAlias. + if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok { + if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 { + var indices []ast.Expr + for i := range typeArgs.Len() { + indices = append(indices, TypeExpr(typeArgs.At(i), qual)) + } + expr = &ast.IndexListExpr{ + X: expr, + Indices: indices, + } + } + } + + return expr + case *types.Struct: return ast.NewIdent(t.String()) @@ -269,9 +345,43 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { return ast.NewIdent(t.String()) case *types.Union: - // TODO(hxjiang): handle the union through syntax (~A | ... | ~Z). - // Remove nil check when calling typesinternal.TypeExpr. - return nil + if t.Len() == 0 { + panic("Union type should have at least one term") + } + // Same as go/ast, the return expression will put last term in the + // Y field at topmost level of BinaryExpr. + // For union of type "float32 | float64 | int64", the structure looks + // similar to: + // { + // X: { + // X: float32, + // Op: | + // Y: float64, + // } + // Op: |, + // Y: int64, + // } + var union ast.Expr + for i := range t.Len() { + term := t.Term(i) + termExpr := TypeExpr(term.Type(), qual) + if term.Tilde() { + termExpr = &ast.UnaryExpr{ + Op: token.TILDE, + X: termExpr, + } + } + if i == 0 { + union = termExpr + } else { + union = &ast.BinaryExpr{ + X: union, + Op: token.OR, + Y: termExpr, + } + } + } + return union case *types.Tuple: panic("invalid input type types.Tuple") diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 6818b2de304..c3e8a4f5911 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -145,22 +145,40 @@ func CheckResponse(res *http.Response) error { } slurp, err := io.ReadAll(res.Body) if err == nil { - jerr := new(errorReply) - err = json.Unmarshal(slurp, jerr) - if err == nil && jerr.Error != nil { - if jerr.Error.Code == 0 { - jerr.Error.Code = res.StatusCode - } - jerr.Error.Body = string(slurp) - jerr.Error.Header = res.Header - return jerr.Error - } + return CheckResponseWithBody(res, slurp) } return &Error{ Code: res.StatusCode, Body: string(slurp), Header: res.Header, } + +} + +// CheckResponseWithBody returns an error (of type *Error) if the response +// status code is not 2xx. Distinct from CheckResponse to allow for checking +// a previously-read body to maintain error detail content. +func CheckResponseWithBody(res *http.Response, body []byte) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + + jerr := new(errorReply) + err := json.Unmarshal(body, jerr) + if err == nil && jerr.Error != nil { + if jerr.Error.Code == 0 { + jerr.Error.Code = res.StatusCode + } + jerr.Error.Body = string(body) + jerr.Error.Header = res.Header + return jerr.Error + } + + return &Error{ + Code: res.StatusCode, + Body: string(body), + Header: res.Header, + } } // IsNotModified reports whether err is the result of the diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index 85ba75d08f5..559cab1385b 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC. +// Copyright 2025 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index a87fd3e727b..d74fe2a2998 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -164,6 +164,8 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e // and calls the returned functions after the request returns (see send.go). // rx is private to the auto-generated API code. // Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. +// Upload does not parse the response into the error on a non 200 response; +// it is the caller's responsibility to call resp.Body.Close. func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { // There are a couple of cases where it's possible for err and resp to both @@ -256,6 +258,18 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err rCtx, cancel = context.WithTimeout(ctx, rx.ChunkTransferTimeout) } + // We close the response's body here, since we definitely will not + // return `resp` now. If we close it before the select case above, a + // timer may fire and cause us to return a response with a closed body + // (in which case, the caller will not get the error message in the body). + if resp != nil && resp.Body != nil { + // Read the body to EOF - if the Body is not both read to EOF and closed, + // the Client's underlying RoundTripper may not be able to re-use the + // persistent TCP connection to the server for a subsequent "keep-alive" request. + // See https://pkg.go.dev/net/http#Client.Do + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } resp, err = rx.transferChunk(rCtx) var status int @@ -282,15 +296,11 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err rx.attempts++ pause = bo.Pause() - if resp != nil && resp.Body != nil { - resp.Body.Close() - } } // If the chunk was uploaded successfully, but there's still // more to go, upload the next chunk without any delay. if statusResumeIncomplete(resp) { - resp.Body.Close() continue } diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 532de375dd4..e55b0e5b710 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.213.0" +const Version = "0.218.0" diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 474fbb49846..89f08a8d98b 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC. +// Copyright 2025 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 3e562182792..3cd9a5bb8e6 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -80,11 +80,12 @@ type ErrorInfo struct { Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` // Additional structured details about this error. // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should + // ideally be lowerCamelCase. Also, they must be limited to 64 characters in // length. When identifying the current value of an exceeded limit, the units // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // `{"instanceLimit": "100/request"}`, should be returned as, + // `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of // instances that can be created in a single (batch) request. Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -870,6 +871,16 @@ type BadRequest_FieldViolation struct { Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` // A description of why the request element is bad. Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The reason of the field-level error. This is a constant value that + // identifies the proximate cause of the field-level error. It should + // uniquely identify the type of the FieldViolation within the scope of the + // google.rpc.ErrorInfo.domain. This should be at most 63 + // characters and match a regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, + // which represents UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + // Provides a localized error message for field-level errors that is safe to + // return to the API consumer. + LocalizedMessage *LocalizedMessage `protobuf:"bytes,4,opt,name=localized_message,json=localizedMessage,proto3" json:"localized_message,omitempty"` } func (x *BadRequest_FieldViolation) Reset() { @@ -918,6 +929,20 @@ func (x *BadRequest_FieldViolation) GetDescription() string { return "" } +func (x *BadRequest_FieldViolation) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *BadRequest_FieldViolation) GetLocalizedMessage() *LocalizedMessage { + if x != nil { + return x.LocalizedMessage + } + return nil +} + // Describes a URL link. type Help_Link struct { state protoimpl.MessageState @@ -1026,51 +1051,57 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, - 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, - 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, - 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, - 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, - 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a, + 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, + 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, + 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, + 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, + 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1111,11 +1142,12 @@ var file_google_rpc_error_details_proto_depIdxs = []int32{ 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_google_rpc_error_details_proto_init() } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 8f9e592f870..cffdfda9619 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -192,7 +192,7 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) erro fd = fieldDescs.ByTextName(name) } } - if flags.ProtoLegacy { + if flags.ProtoLegacyWeak { if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { fd = nil // reset since the weak reference is not linked in } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 24bc98ac422..d972a3d98ed 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -185,7 +185,7 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro } else if xtErr != nil && xtErr != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) } - if flags.ProtoLegacy { + if flags.ProtoLegacyWeak { if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { fd = nil // reset since the weak reference is not linked in } diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go index 58372dd3485..5cb3ee70f91 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/flags.go +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -22,3 +22,8 @@ const ProtoLegacy = protoLegacy // extension fields at unmarshal time, but defers creating the message // structure until the extension is first accessed. const LazyUnmarshalExtensions = ProtoLegacy + +// ProtoLegacyWeak specifies whether to enable support for weak fields. +// This flag was split out of ProtoLegacy in preparation for removing +// support for weak fields (independent of the other protolegacy features). +const ProtoLegacyWeak = ProtoLegacy diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index fb35f0bae9c..229c6980138 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -94,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO return 0 } n := 0 - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) @@ -281,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o if opts.Deterministic() { return appendMapDeterministic(b, mapv, mapi, f, opts) } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { var err error b = protowire.AppendVarint(b, f.wiretag) @@ -328,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { if !mi.needsInitCheck { return nil } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := pointerOfValue(iter.Value()) if err := mi.checkInitializedPointer(val); err != nil { @@ -336,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { } } } else { - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := mapi.conv.valConv.PBValueOf(iter.Value()) if err := mapi.valFuncs.isInit(val); err != nil { @@ -356,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), iter.Value()) } @@ -371,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) } @@ -386,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { val := reflect.New(f.ft.Elem().Elem()) if f.mi != nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go deleted file mode 100644 index 4b15493f2f4..00000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -package impl - -import "reflect" - -type mapIter struct { - v reflect.Value - keys []reflect.Value -} - -// mapRange provides a less-efficient equivalent to -// the Go 1.12 reflect.Value.MapRange method. -func mapRange(v reflect.Value) *mapIter { - return &mapIter{v: v} -} - -func (i *mapIter) Next() bool { - if i.keys == nil { - i.keys = i.v.MapKeys() - } else { - i.keys = i.keys[1:] - } - return len(i.keys) > 0 -} - -func (i *mapIter) Key() reflect.Value { - return i.keys[0] -} - -func (i *mapIter) Value() reflect.Value { - return i.v.MapIndex(i.keys[0]) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go deleted file mode 100644 index 0b31b66eaf8..00000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.12 -// +build go1.12 - -package impl - -import "reflect" - -func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 2f7b363ec4a..111d95833d4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -118,12 +118,12 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { }, } case isOneof: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) case fd.IsWeak(): fieldOffset = si.weakOffset funcs = makeWeakMessageFieldCoder(fd) default: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) } cf := &preallocFields[i] diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go index 88c16ae5b7c..f81d7d0db9a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -45,19 +45,19 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf var childMessage *MessageInfo switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) case fd.IsWeak(): fieldOffset = si.weakOffset funcs = makeWeakMessageFieldCoder(fd) case fd.Message() != nil && !fd.IsMap(): - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) if fd.IsList() { childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft) } else { childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft) } default: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) } cf := &coderFieldInfo{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index 304244a651d..e4580b3ac2e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { return v } func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { - iter := mapRange(ms.v) + iter := ms.v.MapRange() for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() v := ms.valConv.PBValueOf(iter.Value()) diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index fa10a0f5cc9..d1f79b4224f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -165,28 +165,28 @@ fieldLoop: switch f := t.Field(i); f.Name { case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { - si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheOffset = offsetOf(f) si.sizecacheType = f.Type } case genid.WeakFields_goname, genid.WeakFieldsA_goname: if f.Type == weakFieldsType { - si.weakOffset = offsetOf(f, mi.Exporter) + si.weakOffset = offsetOf(f) si.weakType = f.Type } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { - si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownOffset = offsetOf(f) si.unknownType = f.Type } case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { - si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionOffset = offsetOf(f) si.extensionType = f.Type } case "lazyFields", "XXX_lazyUnmarshalInfo": - si.lazyOffset = offsetOf(f, mi.Exporter) + si.lazyOffset = offsetOf(f) case "XXX_presence": - si.presenceOffset = offsetOf(f, mi.Exporter) + si.presenceOffset = offsetOf(f) default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index d407dd791e8..d8dcd788636 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -88,9 +88,7 @@ func opaqueInitHook(mi *MessageInfo) bool { mi.oneofs = map[protoreflect.Name]*oneofInfo{} for i := 0; i < mi.Desc.Oneofs().Len(); i++ { od := mi.Desc.Oneofs().Get(i) - if !od.IsSynthetic() { - mi.oneofs[od.Name()] = makeOneofInfo(od, si.structInfo, mi.Exporter) - } + mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter) } mi.denseFields = make([]*fieldInfo, fds.Len()*2) @@ -119,12 +117,32 @@ func opaqueInitHook(mi *MessageInfo) bool { return true } +func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fd := od.Fields().Get(0) + index, _ := presenceIndex(mi.Desc, fd) + oi.which = func(p pointer) protoreflect.FieldNumber { + if p.IsNil() { + return 0 + } + if !mi.present(p, index) { + return 0 + } + return od.Fields().Get(0).Number() + } + return oi + } + // Dispatch to non-opaque oneof implementation for non-synthetic oneofs. + return makeOneofInfo(od, si, x) +} + func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Map { panic(fmt.Sprintf("invalid type: got %v, want map kind", ft)) } - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) conv := NewConverter(ft, fd) return fieldInfo{ fieldDesc: fd, @@ -178,7 +196,7 @@ func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd prot panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) } conv := NewConverter(reflect.PtrTo(ft), fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) return fieldInfo{ fieldDesc: fd, @@ -228,7 +246,7 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd pro panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) fieldNumber := fd.Number() return fieldInfo{ @@ -321,7 +339,7 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -393,7 +411,7 @@ func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoref deref = true } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) var getter func(p pointer) protoreflect.Value if !nullable { @@ -462,7 +480,7 @@ func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoref func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) fieldNumber := fd.Number() elemType := fs.Type.Elem() diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index a740646205c..3cd1fbc21fb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -76,7 +76,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, isMessage := fd.Message() != nil // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ // NOTE: The logic below intentionally assumes that oneof fields are // well-formatted. That is, the oneof interface never contains a @@ -152,7 +152,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -205,7 +205,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(reflect.PtrTo(ft), fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -269,7 +269,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) // Generate specialized getter functions to avoid going through reflect.Value if nullable { @@ -333,7 +333,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { - if !flags.ProtoLegacy { + if !flags.ProtoLegacyWeak { panic("no support for proto1 weak fields") } @@ -410,7 +410,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -419,7 +419,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() != reflect.Ptr { - return !isZero(rv) + return !rv.IsZero() } return !rv.IsNil() }, @@ -466,7 +466,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -479,7 +479,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } } else { fs := si.oneofsByName[od.Name()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -497,41 +497,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } return oi } - -// isZero is identical to reflect.Value.IsZero. -// TODO: Remove this when Go1.13 is the minimally supported Go version. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - c := v.Complex() - return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return v.IsNil() - case reflect.String: - return v.Len() == 0 - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - default: - panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 041ebde2de6..6bed45e35c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -22,7 +22,7 @@ type Pointer unsafe.Pointer type offset uintptr // offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { +func offsetOf(f reflect.StructField) offset { return offset(f.Offset) } diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 3018450df79..4a39af0c635 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 1 + Patch = 4 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index a3b5e142d24..e28d7acb378 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -172,7 +172,7 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) var err error if fd == nil { err = errUnknown - } else if flags.ProtoLegacy { + } else if flags.ProtoLegacyWeak { if fd.IsWeak() && fd.Message().IsPlaceholder() { err = errUnknown // weak referent is not linked in } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 6de31c2ebdb..5eaf652176c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -149,7 +149,7 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) } } - if f.IsWeak() && !flags.ProtoLegacy { + if f.IsWeak() && !flags.ProtoLegacyWeak { return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) } if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index bf0a0ccdeed..697a61b290e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/internal/editiondefaults" "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" @@ -125,16 +126,43 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW } - if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { - if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { - parentFS.GenerateLegacyUnmarshalJSON = *luje - } - if sep := goFeatures.StripEnumPrefix; sep != nil { - parentFS.StripEnumPrefix = int(*sep) - } - if al := goFeatures.ApiLevel; al != nil { - parentFS.APILevel = int(*al) - } + // We must not use proto.GetExtension(child, gofeaturespb.E_Go) + // because that only works for messages we generated, but not for + // dynamicpb messages. See golang/protobuf#1669. + // + // Further, we harden this code against adversarial inputs: a + // service which accepts descriptors from a possibly malicious + // source shouldn't crash. + goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor()) + if !goFeatures.IsValid() { + return parentFS + } + gf, ok := goFeatures.Interface().(protoreflect.Message) + if !ok { + return parentFS + } + // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures. + fields := gf.Descriptor().Fields() + + if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.BoolKind && + gf.Has(fd) { + parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool() + } + + if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { + parentFS.StripEnumPrefix = int(gf.Get(fd).Enum()) + } + + if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { + parentFS.APILevel = int(gf.Get(fd).Enum()) } return parentFS diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index a551e7ae945..a5163376741 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -46,6 +46,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // The full set of known editions. @@ -4360,7 +4361,7 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor -var file_google_protobuf_descriptor_proto_rawDesc = []byte{ +var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, @@ -5130,16 +5131,16 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -} +}) var ( file_google_protobuf_descriptor_proto_rawDescOnce sync.Once - file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc + file_google_protobuf_descriptor_proto_rawDescData []byte ) func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { - file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc))) }) return file_google_protobuf_descriptor_proto_rawDescData } @@ -5292,7 +5293,7 @@ func file_google_protobuf_descriptor_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), NumEnums: 17, NumMessages: 33, NumExtensions: 0, @@ -5304,7 +5305,6 @@ func file_google_protobuf_descriptor_proto_init() { MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, }.Build() File_google_protobuf_descriptor_proto = out.File - file_google_protobuf_descriptor_proto_rawDesc = nil file_google_protobuf_descriptor_proto_goTypes = nil file_google_protobuf_descriptor_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index e0b72eaf922..28d24bad79e 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -16,6 +16,7 @@ import ( descriptorpb "google.golang.org/protobuf/types/descriptorpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) type GoFeatures_APILevel int32 @@ -227,7 +228,7 @@ var ( var File_google_protobuf_go_features_proto protoreflect.FileDescriptor -var file_google_protobuf_go_features_proto_rawDesc = []byte{ +var file_google_protobuf_go_features_proto_rawDesc = string([]byte{ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, @@ -283,16 +284,16 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, -} +}) var ( file_google_protobuf_go_features_proto_rawDescOnce sync.Once - file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc + file_google_protobuf_go_features_proto_rawDescData []byte ) func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { - file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc))) }) return file_google_protobuf_go_features_proto_rawDescData } @@ -326,7 +327,7 @@ func file_google_protobuf_go_features_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)), NumEnums: 2, NumMessages: 1, NumExtensions: 1, @@ -339,7 +340,6 @@ func file_google_protobuf_go_features_proto_init() { ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() File_google_protobuf_go_features_proto = out.File - file_google_protobuf_go_features_proto_rawDesc = nil file_google_protobuf_go_features_proto_goTypes = nil file_google_protobuf_go_features_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 191552cce09..497da66e91f 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -122,6 +122,7 @@ import ( reflect "reflect" strings "strings" sync "sync" + unsafe "unsafe" ) // `Any` contains an arbitrary serialized protocol buffer message along with a @@ -411,7 +412,7 @@ func (x *Any) GetValue() []byte { var File_google_protobuf_any_proto protoreflect.FileDescriptor -var file_google_protobuf_any_proto_rawDesc = []byte{ +var file_google_protobuf_any_proto_rawDesc = string([]byte{ 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, @@ -427,16 +428,16 @@ var file_google_protobuf_any_proto_rawDesc = []byte{ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_any_proto_rawDescOnce sync.Once - file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc + file_google_protobuf_any_proto_rawDescData []byte ) func file_google_protobuf_any_proto_rawDescGZIP() []byte { file_google_protobuf_any_proto_rawDescOnce.Do(func() { - file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc))) }) return file_google_protobuf_any_proto_rawDescData } @@ -462,7 +463,7 @@ func file_google_protobuf_any_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_any_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -473,7 +474,6 @@ func file_google_protobuf_any_proto_init() { MessageInfos: file_google_protobuf_any_proto_msgTypes, }.Build() File_google_protobuf_any_proto = out.File - file_google_protobuf_any_proto_rawDesc = nil file_google_protobuf_any_proto_goTypes = nil file_google_protobuf_any_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 34d76e6cd93..193880d1813 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -80,6 +80,7 @@ import ( reflect "reflect" sync "sync" time "time" + unsafe "unsafe" ) // A Duration represents a signed, fixed-length span of time represented @@ -288,7 +289,7 @@ func (x *Duration) GetNanos() int32 { var File_google_protobuf_duration_proto protoreflect.FileDescriptor -var file_google_protobuf_duration_proto_rawDesc = []byte{ +var file_google_protobuf_duration_proto_rawDesc = string([]byte{ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, @@ -305,16 +306,16 @@ var file_google_protobuf_duration_proto_rawDesc = []byte{ 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_duration_proto_rawDescOnce sync.Once - file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc + file_google_protobuf_duration_proto_rawDescData []byte ) func file_google_protobuf_duration_proto_rawDescGZIP() []byte { file_google_protobuf_duration_proto_rawDescOnce.Do(func() { - file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc))) }) return file_google_protobuf_duration_proto_rawDescData } @@ -340,7 +341,7 @@ func file_google_protobuf_duration_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -351,7 +352,6 @@ func file_google_protobuf_duration_proto_init() { MessageInfos: file_google_protobuf_duration_proto_msgTypes, }.Build() File_google_protobuf_duration_proto = out.File - file_google_protobuf_duration_proto_rawDesc = nil file_google_protobuf_duration_proto_goTypes = nil file_google_protobuf_duration_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index 7fcdd382cc9..a5b8657c4ba 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -38,6 +38,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // A generic empty message that you can re-use to avoid defining duplicated @@ -85,7 +86,7 @@ func (*Empty) Descriptor() ([]byte, []int) { var File_google_protobuf_empty_proto protoreflect.FileDescriptor -var file_google_protobuf_empty_proto_rawDesc = []byte{ +var file_google_protobuf_empty_proto_rawDesc = string([]byte{ 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, @@ -98,16 +99,16 @@ var file_google_protobuf_empty_proto_rawDesc = []byte{ 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_empty_proto_rawDescOnce sync.Once - file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc + file_google_protobuf_empty_proto_rawDescData []byte ) func file_google_protobuf_empty_proto_rawDescGZIP() []byte { file_google_protobuf_empty_proto_rawDescOnce.Do(func() { - file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) + file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc))) }) return file_google_protobuf_empty_proto_rawDescData } @@ -133,7 +134,7 @@ func file_google_protobuf_empty_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_empty_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -144,7 +145,6 @@ func file_google_protobuf_empty_proto_init() { MessageInfos: file_google_protobuf_empty_proto_msgTypes, }.Build() File_google_protobuf_empty_proto = out.File - file_google_protobuf_empty_proto_rawDesc = nil file_google_protobuf_empty_proto_goTypes = nil file_google_protobuf_empty_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index e5d7da38c20..041feb0f3ec 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -83,6 +83,7 @@ import ( sort "sort" strings "strings" sync "sync" + unsafe "unsafe" ) // `FieldMask` represents a set of symbolic field paths, for example: @@ -503,7 +504,7 @@ func (x *FieldMask) GetPaths() []string { var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor -var file_google_protobuf_field_mask_proto_rawDesc = []byte{ +var file_google_protobuf_field_mask_proto_rawDesc = string([]byte{ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, @@ -519,16 +520,16 @@ var file_google_protobuf_field_mask_proto_rawDesc = []byte{ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_field_mask_proto_rawDescOnce sync.Once - file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc + file_google_protobuf_field_mask_proto_rawDescData []byte ) func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { - file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) + file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc))) }) return file_google_protobuf_field_mask_proto_rawDescData } @@ -554,7 +555,7 @@ func file_google_protobuf_field_mask_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -565,7 +566,6 @@ func file_google_protobuf_field_mask_proto_init() { MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, }.Build() File_google_protobuf_field_mask_proto = out.File - file_google_protobuf_field_mask_proto_rawDesc = nil file_google_protobuf_field_mask_proto_goTypes = nil file_google_protobuf_field_mask_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index f2c53ea337c..ecdd31ab538 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -128,6 +128,7 @@ import ( reflect "reflect" sync "sync" utf8 "unicode/utf8" + unsafe "unsafe" ) // `NullValue` is a singleton enumeration to represent the null value for the @@ -671,7 +672,7 @@ func (x *ListValue) GetValues() []*Value { var File_google_protobuf_struct_proto protoreflect.FileDescriptor -var file_google_protobuf_struct_proto_rawDesc = []byte{ +var file_google_protobuf_struct_proto_rawDesc = string([]byte{ 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, @@ -719,16 +720,16 @@ var file_google_protobuf_struct_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_struct_proto_rawDescOnce sync.Once - file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc + file_google_protobuf_struct_proto_rawDescData []byte ) func file_google_protobuf_struct_proto_rawDescGZIP() []byte { file_google_protobuf_struct_proto_rawDescOnce.Do(func() { - file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData) + file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc))) }) return file_google_protobuf_struct_proto_rawDescData } @@ -773,7 +774,7 @@ func file_google_protobuf_struct_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_struct_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)), NumEnums: 1, NumMessages: 4, NumExtensions: 0, @@ -785,7 +786,6 @@ func file_google_protobuf_struct_proto_init() { MessageInfos: file_google_protobuf_struct_proto_msgTypes, }.Build() File_google_protobuf_struct_proto = out.File - file_google_protobuf_struct_proto_rawDesc = nil file_google_protobuf_struct_proto_goTypes = nil file_google_protobuf_struct_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 9550109aa3b..00ac835c0bb 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -78,6 +78,7 @@ import ( reflect "reflect" sync "sync" time "time" + unsafe "unsafe" ) // A Timestamp represents a point in time independent of any time zone or local @@ -297,7 +298,7 @@ func (x *Timestamp) GetNanos() int32 { var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor -var file_google_protobuf_timestamp_proto_rawDesc = []byte{ +var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{ 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, @@ -314,16 +315,16 @@ var file_google_protobuf_timestamp_proto_rawDesc = []byte{ 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_timestamp_proto_rawDescOnce sync.Once - file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc + file_google_protobuf_timestamp_proto_rawDescData []byte ) func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { - file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) + file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc))) }) return file_google_protobuf_timestamp_proto_rawDescData } @@ -349,7 +350,7 @@ func file_google_protobuf_timestamp_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -360,7 +361,6 @@ func file_google_protobuf_timestamp_proto_init() { MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, }.Build() File_google_protobuf_timestamp_proto = out.File - file_google_protobuf_timestamp_proto_rawDesc = nil file_google_protobuf_timestamp_proto_goTypes = nil file_google_protobuf_timestamp_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 15b424ec12c..5de5301063b 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -48,6 +48,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // Wrapper message for `double`. @@ -529,7 +530,7 @@ func (x *BytesValue) GetValue() []byte { var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor -var file_google_protobuf_wrappers_proto_rawDesc = []byte{ +var file_google_protobuf_wrappers_proto_rawDesc = string([]byte{ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, @@ -563,16 +564,16 @@ var file_google_protobuf_wrappers_proto_rawDesc = []byte{ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_wrappers_proto_rawDescOnce sync.Once - file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc + file_google_protobuf_wrappers_proto_rawDescData []byte ) func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() { - file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData) + file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc))) }) return file_google_protobuf_wrappers_proto_rawDescData } @@ -606,7 +607,7 @@ func file_google_protobuf_wrappers_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)), NumEnums: 0, NumMessages: 9, NumExtensions: 0, @@ -617,7 +618,6 @@ func file_google_protobuf_wrappers_proto_init() { MessageInfos: file_google_protobuf_wrappers_proto_msgTypes, }.Build() File_google_protobuf_wrappers_proto = out.File - file_google_protobuf_wrappers_proto_rawDesc = nil file_google_protobuf_wrappers_proto_goTypes = nil file_google_protobuf_wrappers_proto_depIdxs = nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 0edeea73918..7c7fe84e345 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,8 +4,8 @@ cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/auth v0.13.0 -## explicit; go 1.21 +# cloud.google.com/go/auth v0.14.0 +## explicit; go 1.22 cloud.google.com/go/auth cloud.google.com/go/auth/credentials cloud.google.com/go/auth/credentials/internal/externalaccount @@ -21,8 +21,8 @@ cloud.google.com/go/auth/internal/credsfile cloud.google.com/go/auth/internal/jwt cloud.google.com/go/auth/internal/transport cloud.google.com/go/auth/internal/transport/cert -# cloud.google.com/go/auth/oauth2adapt v0.2.6 -## explicit; go 1.21 +# cloud.google.com/go/auth/oauth2adapt v0.2.7 +## explicit; go 1.22 cloud.google.com/go/auth/oauth2adapt # cloud.google.com/go/compute/metadata v0.6.0 ## explicit; go 1.21 @@ -37,7 +37,7 @@ cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/storagepb -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource @@ -59,7 +59,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal @@ -87,7 +87,7 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service -# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 +# github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 ## explicit; go 1.18 github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential @@ -143,7 +143,7 @@ github.com/armon/go-metrics/prometheus # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.55.5 +# github.com/aws/aws-sdk-go v1.55.6 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/auth/bearer @@ -276,8 +276,6 @@ github.com/aws/smithy-go/transport/http/internal/io # github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 ## explicit; go 1.20 github.com/bboreham/go-loser -# github.com/benbjohnson/clock v1.3.5 -## explicit; go 1.15 # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -344,6 +342,8 @@ github.com/efficientgo/core/testutil/internal # github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd ## explicit; go 1.15 github.com/efficientgo/tools/extkingpin +# github.com/envoyproxy/go-control-plane v0.12.0 +## explicit; go 1.17 # github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb ## explicit github.com/facette/natsort @@ -502,7 +502,7 @@ github.com/google/go-cmp/cmp/internal/value # github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad ## explicit; go 1.22 github.com/google/pprof/profile -# github.com/google/s2a-go v0.1.8 +# github.com/google/s2a-go v0.1.9 ## explicit; go 1.20 github.com/google/s2a-go github.com/google/s2a-go/fallback @@ -532,7 +532,7 @@ github.com/google/uuid ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.14.0 +# github.com/googleapis/gax-go/v2 v2.14.1 ## explicit; go 1.21 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror @@ -556,8 +556,8 @@ github.com/grpc-ecosystem/go-grpc-middleware github.com/grpc-ecosystem/go-grpc-middleware/v2 github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 -## explicit; go 1.22.7 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 +## explicit; go 1.22.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -680,7 +680,7 @@ github.com/mdlayher/vsock # github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a ## explicit; go 1.14 github.com/metalmatze/signal/server/signalhttp -# github.com/miekg/dns v1.1.62 +# github.com/miekg/dns v1.1.63 ## explicit; go 1.19 github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 @@ -733,6 +733,24 @@ github.com/oklog/run # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 +## explicit; go 1.22.0 +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 +## explicit; go 1.22.0 +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 +## explicit; go 1.22.0 +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/putil/pslice +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry # github.com/opentracing-contrib/go-grpc v0.1.0 ## explicit; go 1.22.7 github.com/opentracing-contrib/go-grpc @@ -814,8 +832,8 @@ github.com/prometheus/alertmanager/template github.com/prometheus/alertmanager/timeinterval github.com/prometheus/alertmanager/types github.com/prometheus/alertmanager/ui -# github.com/prometheus/client_golang v1.20.5 -## explicit; go 1.20 +# github.com/prometheus/client_golang v1.21.0-rc.0 +## explicit; go 1.21 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil @@ -833,7 +851,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.61.0 +# github.com/prometheus/common v0.62.0 ## explicit; go 1.21 github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -853,7 +871,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.301.0 +# github.com/prometheus/prometheus v0.302.1 ## explicit; go 1.22.7 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -913,7 +931,7 @@ github.com/prometheus/prometheus/util/strutil github.com/prometheus/prometheus/util/testutil github.com/prometheus/prometheus/util/zeropool github.com/prometheus/prometheus/web/api/v1 -# github.com/prometheus/sigv4 v0.1.0 +# github.com/prometheus/sigv4 v0.1.1 ## explicit; go 1.21 github.com/prometheus/sigv4 # github.com/redis/rueidis v1.0.45-alpha.1 @@ -1181,7 +1199,17 @@ go.opencensus.io/trace/tracestate ## explicit; go 1.22.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/collector/pdata v1.22.0 +# go.opentelemetry.io/collector/component v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/component +# go.opentelemetry.io/collector/config/configtelemetry v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/config/configtelemetry +# go.opentelemetry.io/collector/consumer v1.24.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/consumer +go.opentelemetry.io/collector/consumer/internal +# go.opentelemetry.io/collector/pdata v1.24.0 ## explicit; go 1.22.0 go.opentelemetry.io/collector/pdata/internal go.opentelemetry.io/collector/pdata/internal/data @@ -1198,20 +1226,29 @@ go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1 go.opentelemetry.io/collector/pdata/internal/json go.opentelemetry.io/collector/pdata/internal/otlp go.opentelemetry.io/collector/pdata/pcommon +go.opentelemetry.io/collector/pdata/plog go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp -# go.opentelemetry.io/collector/semconv v0.116.0 +go.opentelemetry.io/collector/pdata/ptrace +# go.opentelemetry.io/collector/pipeline v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/pipeline +go.opentelemetry.io/collector/pipeline/internal/globalsignal +# go.opentelemetry.io/collector/processor v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/processor +# go.opentelemetry.io/collector/semconv v0.118.0 ## explicit; go 1.22.0 go.opentelemetry.io/collector/semconv/v1.6.1 # go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.59.0 ## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 ## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request @@ -1232,7 +1269,7 @@ go.opentelemetry.io/contrib/propagators/jaeger # go.opentelemetry.io/contrib/propagators/ot v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/ot -# go.opentelemetry.io/otel v1.33.0 +# go.opentelemetry.io/otel v1.34.0 ## explicit; go 1.22.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1253,23 +1290,23 @@ go.opentelemetry.io/otel/semconv/v1.26.0 ## explicit; go 1.22.0 go.opentelemetry.io/otel/bridge/opentracing go.opentelemetry.io/otel/bridge/opentracing/migration -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 -## explicit; go 1.22.7 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 -## explicit; go 1.22.7 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.33.0 +# go.opentelemetry.io/otel/metric v1.34.0 ## explicit; go 1.22.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.33.0 +# go.opentelemetry.io/otel/sdk v1.34.0 ## explicit; go 1.22.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -1278,13 +1315,13 @@ go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/trace v1.33.0 +# go.opentelemetry.io/otel/trace v1.34.0 ## explicit; go 1.22.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.4.0 -## explicit; go 1.22.7 +# go.opentelemetry.io/proto/otlp v1.5.0 +## explicit; go 1.22.0 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 @@ -1299,13 +1336,16 @@ go.uber.org/goleak/internal/stack # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# go.uber.org/zap v1.21.0 -## explicit; go 1.13 +# go.uber.org/zap v1.27.0 +## explicit; go 1.19 go.uber.org/zap go.uber.org/zap/buffer +go.uber.org/zap/internal go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit +go.uber.org/zap/internal/pool +go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc # go4.org/intern v0.0.0-20230525184215-6c62f75575cb @@ -1314,7 +1354,7 @@ go4.org/intern # go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 ## explicit; go 1.11 go4.org/unsafe/assume-no-moving-gc -# golang.org/x/crypto v0.31.0 +# golang.org/x/crypto v0.32.0 ## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -1336,7 +1376,7 @@ golang.org/x/exp/slices # golang.org/x/mod v0.22.0 ## explicit; go 1.22.0 golang.org/x/mod/semver -# golang.org/x/net v0.33.0 +# golang.org/x/net v0.34.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context @@ -1354,7 +1394,7 @@ golang.org/x/net/ipv6 golang.org/x/net/netutil golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.24.0 +# golang.org/x/oauth2 v0.25.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1372,7 +1412,7 @@ golang.org/x/oauth2/jwt golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.28.0 +# golang.org/x/sys v0.29.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix @@ -1391,10 +1431,10 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.8.0 +# golang.org/x/time v0.9.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.28.0 +# golang.org/x/tools v0.29.0 ## explicit; go 1.22.0 golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/packages @@ -1418,8 +1458,8 @@ golang.org/x/tools/internal/versions gonum.org/v1/gonum/floats gonum.org/v1/gonum/floats/scalar gonum.org/v1/gonum/internal/asm/f64 -# google.golang.org/api v0.213.0 -## explicit; go 1.21 +# google.golang.org/api v0.218.0 +## explicit; go 1.22 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/iamcredentials/v1 @@ -1439,17 +1479,17 @@ google.golang.org/api/transport/http ## explicit; go 1.21 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 -## explicit; go 1.21 +# google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f +## explicit; go 1.22 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 -## explicit; go 1.21 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f +## explicit; go 1.22 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.69.0 => google.golang.org/grpc v1.65.0 +# google.golang.org/grpc v1.70.0 => google.golang.org/grpc v1.65.0 ## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1519,7 +1559,7 @@ google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap google.golang.org/grpc/test/bufconn -# google.golang.org/protobuf v1.36.1 +# google.golang.org/protobuf v1.36.4 ## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson From 9694410d5d24eb998f78826e181b99bae8e8b626 Mon Sep 17 00:00:00 2001 From: Anand Rajagopal Date: Wed, 5 Mar 2025 18:27:20 +0000 Subject: [PATCH 2/3] Upgrade client-golang and introduce flag for restoring 'for' state Signed-off-by: Anand Rajagopal --- docs/configuration/config-file-reference.md | 4 ++ go.mod | 2 +- go.sum | 2 + pkg/ruler/compat.go | 2 +- pkg/ruler/ruler.go | 7 +-- .../client_golang/prometheus/atomic_update.go | 50 ------------------- .../client_golang/prometheus/counter.go | 10 ++-- .../client_golang/prometheus/gauge.go | 10 ++-- .../client_golang/prometheus/histogram.go | 10 ++-- .../prometheus/process_collector_darwin.go | 2 + ...n.c => process_collector_mem_cgo_darwin.c} | 2 +- ...go => process_collector_mem_cgo_darwin.go} | 2 +- ... => process_collector_mem_nocgo_darwin.go} | 2 +- ....go => process_collector_not_supported.go} | 4 +- ....go => process_collector_procfsenabled.go} | 0 .../client_golang/prometheus/summary.go | 25 ++++++---- vendor/modules.txt | 2 +- 17 files changed, 56 insertions(+), 80 deletions(-) delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go rename vendor/github.com/prometheus/client_golang/prometheus/{process_collector_cgo_darwin.c => process_collector_mem_cgo_darwin.c} (98%) rename vendor/github.com/prometheus/client_golang/prometheus/{process_collector_cgo_darwin.go => process_collector_mem_cgo_darwin.go} (97%) rename vendor/github.com/prometheus/client_golang/prometheus/{process_collector_nocgo_darwin.go => process_collector_mem_nocgo_darwin.go} (97%) rename vendor/github.com/prometheus/client_golang/prometheus/{process_collector_wasip1_js.go => process_collector_not_supported.go} (95%) rename vendor/github.com/prometheus/client_golang/prometheus/{process_collector_other.go => process_collector_procfsenabled.go} (100%) diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index f517ede562c..4c7a599b96e 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -4855,6 +4855,10 @@ ring: # ruler.enable-ha-evaluation is true. # CLI flag: -ruler.liveness-check-timeout [liveness_check_timeout: | default = 1s] + +# If true, restores the `for` state of new rule groups added to an existing rule manager. +# CLI flag: -ruler.always-restore-new-rule-groups +[always_restore_new_rule_groups: | default = false] ``` ### `ruler_storage_config` diff --git a/go.mod b/go.mod index a3cfad1ee12..16036b725ca 100644 --- a/go.mod +++ b/go.mod @@ -41,7 +41,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.28.0 - github.com/prometheus/client_golang v1.21.0-rc.0 + github.com/prometheus/client_golang v1.21.1 github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.62.0 // Prometheus maps version 2.x.y to tags v0.x.y. diff --git a/go.sum b/go.sum index 8fd28a4599e..1c884e1c134 100644 --- a/go.sum +++ b/go.sum @@ -1573,6 +1573,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.21.0-rc.0 h1:bR+RxBlwcr4q8hXkgSOA/J18j6n0/qH0Gb0DH+8c+RY= github.com/prometheus/client_golang v1.21.0-rc.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= +github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= +github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 37391e6926d..c154f417772 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -370,7 +370,7 @@ func DefaultTenantManagerFactory(cfg Config, p Pusher, q storage.Queryable, engi DefaultRuleQueryOffset: func() time.Duration { return overrides.RulerQueryOffset(userID) }, - RestoreNewRuleGroups: true, + RestoreNewRuleGroups: cfg.AlwaysRestoreNewRuleGroups, }), nil } } diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 7cce799b896..bdc997d5525 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -170,8 +170,9 @@ type Config struct { EnableQueryStats bool `yaml:"query_stats_enabled"` DisableRuleGroupLabel bool `yaml:"disable_rule_group_label"` - EnableHAEvaluation bool `yaml:"enable_ha_evaluation"` - LivenessCheckTimeout time.Duration `yaml:"liveness_check_timeout"` + EnableHAEvaluation bool `yaml:"enable_ha_evaluation"` + LivenessCheckTimeout time.Duration `yaml:"liveness_check_timeout"` + AlwaysRestoreNewRuleGroups bool `yaml:"always_restore_new_rule_groups"` } // Validate config and returns error on failure @@ -254,7 +255,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.EnableHAEvaluation, "ruler.enable-ha-evaluation", false, "Enable high availability") f.DurationVar(&cfg.LivenessCheckTimeout, "ruler.liveness-check-timeout", 1*time.Second, "Timeout duration for non-primary rulers during liveness checks. If the check times out, the non-primary ruler will evaluate the rule group. Applicable when ruler.enable-ha-evaluation is true.") - + f.BoolVar(&cfg.AlwaysRestoreNewRuleGroups, "ruler.always-restore-new-rule-groups", false, "When enabled, ruler will always restore the `for` state of new rule groups") cfg.RingCheckPeriod = 5 * time.Second } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go b/vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go deleted file mode 100644 index b65896a3195..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "sync/atomic" - "time" -) - -// atomicUpdateFloat atomically updates the float64 value pointed to by bits -// using the provided updateFunc, with an exponential backoff on contention. -func atomicUpdateFloat(bits *uint64, updateFunc func(float64) float64) { - const ( - // both numbers are derived from empirical observations - // documented in this PR: https://github.com/prometheus/client_golang/pull/1661 - maxBackoff = 320 * time.Millisecond - initialBackoff = 10 * time.Millisecond - ) - backoff := initialBackoff - - for { - loadedBits := atomic.LoadUint64(bits) - oldFloat := math.Float64frombits(loadedBits) - newFloat := updateFunc(oldFloat) - newBits := math.Float64bits(newFloat) - - if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { - break - } else { - // Exponential backoff with sleep and cap to avoid infinite wait - time.Sleep(backoff) - backoff *= 2 - if backoff > maxBackoff { - backoff = maxBackoff - } - } - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 2996aef6a0a..4ce84e7a80e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -134,9 +134,13 @@ func (c *counter) Add(v float64) { return } - atomicUpdateFloat(&c.valBits, func(oldVal float64) float64 { - return oldVal + v - }) + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } } func (c *counter) AddWithExemplar(v float64, e Labels) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index aa18463654f..dd2eac94067 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -120,9 +120,13 @@ func (g *gauge) Dec() { } func (g *gauge) Add(val float64) { - atomicUpdateFloat(&g.valBits, func(oldVal float64) float64 { - return oldVal + val - }) + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } } func (g *gauge) Sub(val float64) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 1a279035b30..c453b754a7f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -1647,9 +1647,13 @@ func waitForCooldown(count uint64, counts *histogramCounts) { // atomicAddFloat adds the provided float atomically to another float // represented by the bit pattern the bits pointer is pointing to. func atomicAddFloat(bits *uint64, v float64) { - atomicUpdateFloat(bits, func(oldVal float64) float64 { - return oldVal + v - }) + for { + loadedBits := atomic.LoadUint64(bits) + newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) + if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { + break + } + } } // atomicDecUint32 atomically decrements the uint32 p points to. See diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go index 50eb860a682..0a61b984613 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go @@ -11,6 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build darwin && !ios + package prometheus import ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.c b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c similarity index 98% rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.c rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c index 1554f674d85..d00a24315de 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.c +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build darwin && cgo +//go:build darwin && !ios && cgo #include #include diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go similarity index 97% rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go index b375c3a7719..9ac53f99925 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build darwin && cgo +//go:build darwin && !ios && cgo package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go similarity index 97% rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go index 5165047311e..8ddb0995d6a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build darwin && !cgo +//go:build darwin && !ios && !cgo package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go similarity index 95% rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1_js.go rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go index c68f7f85187..7732b7f3764 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1_js.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build wasip1 || js -// +build wasip1 js +//go:build wasip1 || js || ios +// +build wasip1 js ios package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go similarity index 100% rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 76a9e12f4a4..ac5203c6faa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -471,9 +471,13 @@ func (s *noObjectivesSummary) Observe(v float64) { n := atomic.AddUint64(&s.countAndHotIdx, 1) hotCounts := s.counts[n>>63] - atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 { - return oldVal + v - }) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } // Increment count last as we take it as a signal that the observation // is complete. atomic.AddUint64(&hotCounts.count, 1) @@ -515,13 +519,14 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error { // Finally add all the cold counts to the new hot counts and reset the cold counts. atomic.AddUint64(&hotCounts.count, count) atomic.StoreUint64(&coldCounts.count, 0) - - // Use atomicUpdateFloat to update hotCounts.sumBits atomically. - atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 { - return oldVal + sum.GetSampleSum() - }) - atomic.StoreUint64(&coldCounts.sumBits, 0) - + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } return nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 7c7fe84e345..125fa187d08 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -832,7 +832,7 @@ github.com/prometheus/alertmanager/template github.com/prometheus/alertmanager/timeinterval github.com/prometheus/alertmanager/types github.com/prometheus/alertmanager/ui -# github.com/prometheus/client_golang v1.21.0-rc.0 +# github.com/prometheus/client_golang v1.21.1 ## explicit; go 1.21 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 From d01ad5f2a533957b1dd3edbb33fadfa401ce5815 Mon Sep 17 00:00:00 2001 From: Anand Rajagopal Date: Wed, 5 Mar 2025 20:34:20 +0000 Subject: [PATCH 3/3] Restore 'for' state if sharding is enabled Signed-off-by: Anand Rajagopal --- docs/configuration/config-file-reference.md | 4 ---- go.sum | 2 -- pkg/ruler/compat.go | 2 +- pkg/ruler/ruler.go | 6 ++---- 4 files changed, 3 insertions(+), 11 deletions(-) diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 4c7a599b96e..f517ede562c 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -4855,10 +4855,6 @@ ring: # ruler.enable-ha-evaluation is true. # CLI flag: -ruler.liveness-check-timeout [liveness_check_timeout: | default = 1s] - -# If true, restores the `for` state of new rule groups added to an existing rule manager. -# CLI flag: -ruler.always-restore-new-rule-groups -[always_restore_new_rule_groups: | default = false] ``` ### `ruler_storage_config` diff --git a/go.sum b/go.sum index 1c884e1c134..b943cc8638d 100644 --- a/go.sum +++ b/go.sum @@ -1571,8 +1571,6 @@ github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.21.0-rc.0 h1:bR+RxBlwcr4q8hXkgSOA/J18j6n0/qH0Gb0DH+8c+RY= -github.com/prometheus/client_golang v1.21.0-rc.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index c154f417772..eb34ee02e3a 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -370,7 +370,7 @@ func DefaultTenantManagerFactory(cfg Config, p Pusher, q storage.Queryable, engi DefaultRuleQueryOffset: func() time.Duration { return overrides.RulerQueryOffset(userID) }, - RestoreNewRuleGroups: cfg.AlwaysRestoreNewRuleGroups, + RestoreNewRuleGroups: cfg.EnableSharding, }), nil } } diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index bdc997d5525..067c7a4f591 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -170,9 +170,8 @@ type Config struct { EnableQueryStats bool `yaml:"query_stats_enabled"` DisableRuleGroupLabel bool `yaml:"disable_rule_group_label"` - EnableHAEvaluation bool `yaml:"enable_ha_evaluation"` - LivenessCheckTimeout time.Duration `yaml:"liveness_check_timeout"` - AlwaysRestoreNewRuleGroups bool `yaml:"always_restore_new_rule_groups"` + EnableHAEvaluation bool `yaml:"enable_ha_evaluation"` + LivenessCheckTimeout time.Duration `yaml:"liveness_check_timeout"` } // Validate config and returns error on failure @@ -255,7 +254,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.EnableHAEvaluation, "ruler.enable-ha-evaluation", false, "Enable high availability") f.DurationVar(&cfg.LivenessCheckTimeout, "ruler.liveness-check-timeout", 1*time.Second, "Timeout duration for non-primary rulers during liveness checks. If the check times out, the non-primary ruler will evaluate the rule group. Applicable when ruler.enable-ha-evaluation is true.") - f.BoolVar(&cfg.AlwaysRestoreNewRuleGroups, "ruler.always-restore-new-rule-groups", false, "When enabled, ruler will always restore the `for` state of new rule groups") cfg.RingCheckPeriod = 5 * time.Second }