diff --git a/CHANGELOG.md b/CHANGELOG.md index 7afd1c9e1cb..467045f2fac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ * [CHANGE] Alertmanager: Validating new fields on the PagerDuty AM config. #5290 * [CHANGE] Ingester: Creating label `native-histogram-sample` on the `cortex_discarded_samples_total` to keep track of discarded native histogram samples. #5289 * [CHANGE] Store Gateway: Rename `cortex_bucket_store_cached_postings_compression_time_seconds` to `cortex_bucket_store_cached_postings_compression_time_seconds_total`. #5431 +* [CHANGE] Store Gateway: Rename `cortex_bucket_store_cached_series_fetch_duration_seconds` to `cortex_bucket_store_series_fetch_duration_seconds` and `cortex_bucket_store_cached_postings_fetch_duration_seconds` to `cortex_bucket_store_postings_fetch_duration_seconds`. Add new metric `cortex_bucket_store_chunks_fetch_duration_seconds`. #5448 +* [CHANGE] Store Gateway: Remove `idle_timeout`, `max_conn_age`, `pool_size`, `min_idle_conns` fields for Redis index cache and caching bucket. #5448 * [FEATURE] Store Gateway: Add `max_downloaded_bytes_per_request` to limit max bytes to download per store gateway request. * [FEATURE] Added 2 flags `-alertmanager.alertmanager-client.grpc-max-send-msg-size` and ` -alertmanager.alertmanager-client.grpc-max-recv-msg-size` to configure alert manager grpc client message size limits. #5338 * [FEATURE] Query Frontend: Add `cortex_rejected_queries_total` metric for throttled queries. #5356 diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index 37ffa31f07e..8ed2ed7a607 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -601,15 +601,6 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.index-cache.redis.master-name [master_name: | default = ""] - # Maximum number of socket connections. - # CLI flag: -blocks-storage.bucket-store.index-cache.redis.pool-size - [pool_size: | default = 100] - - # Specifies the minimum number of idle connections, which is useful when - # it is slow to establish new connections. - # CLI flag: -blocks-storage.bucket-store.index-cache.redis.min-idle-conns - [min_idle_conns: | default = 10] - # The maximum number of concurrent GetMulti() operations. If set to 0, # concurrency is unlimited. # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-get-multi-concurrency @@ -640,16 +631,6 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.index-cache.redis.write-timeout [write_timeout: | default = 3s] - # Amount of time after which client closes idle connections. Should be - # less than server's timeout. -1 disables idle timeout check. - # CLI flag: -blocks-storage.bucket-store.index-cache.redis.idle-timeout - [idle_timeout: | default = 5m] - - # Connection age at which client retires (closes) the connection. - # Default 0 is to not close aged connections. - # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-conn-age - [max_conn_age: | default = 0s] - # Whether to enable tls for redis connection. # CLI flag: -blocks-storage.bucket-store.index-cache.redis.tls-enabled [tls_enabled: | default = false] @@ -760,15 +741,6 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.master-name [master_name: | default = ""] - # Maximum number of socket connections. - # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.pool-size - [pool_size: | default = 100] - - # Specifies the minimum number of idle connections, which is useful when - # it is slow to establish new connections. - # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.min-idle-conns - [min_idle_conns: | default = 10] - # The maximum number of concurrent GetMulti() operations. If set to 0, # concurrency is unlimited. # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-get-multi-concurrency @@ -799,16 +771,6 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.write-timeout [write_timeout: | default = 3s] - # Amount of time after which client closes idle connections. Should be - # less than server's timeout. -1 disables idle timeout check. - # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.idle-timeout - [idle_timeout: | default = 5m] - - # Connection age at which client retires (closes) the connection. - # Default 0 is to not close aged connections. - # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-conn-age - [max_conn_age: | default = 0s] - # Whether to enable tls for redis connection. # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.tls-enabled [tls_enabled: | default = false] @@ -938,15 +900,6 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.master-name [master_name: | default = ""] - # Maximum number of socket connections. - # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.pool-size - [pool_size: | default = 100] - - # Specifies the minimum number of idle connections, which is useful when - # it is slow to establish new connections. - # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.min-idle-conns - [min_idle_conns: | default = 10] - # The maximum number of concurrent GetMulti() operations. If set to 0, # concurrency is unlimited. # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-get-multi-concurrency @@ -977,16 +930,6 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.write-timeout [write_timeout: | default = 3s] - # Amount of time after which client closes idle connections. Should be - # less than server's timeout. -1 disables idle timeout check. - # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.idle-timeout - [idle_timeout: | default = 5m] - - # Connection age at which client retires (closes) the connection. - # Default 0 is to not close aged connections. - # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-conn-age - [max_conn_age: | default = 0s] - # Whether to enable tls for redis connection. # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.tls-enabled [tls_enabled: | default = false] diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index 431a80cb93f..2a0d6d950d6 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -688,15 +688,6 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.index-cache.redis.master-name [master_name: | default = ""] - # Maximum number of socket connections. - # CLI flag: -blocks-storage.bucket-store.index-cache.redis.pool-size - [pool_size: | default = 100] - - # Specifies the minimum number of idle connections, which is useful when - # it is slow to establish new connections. - # CLI flag: -blocks-storage.bucket-store.index-cache.redis.min-idle-conns - [min_idle_conns: | default = 10] - # The maximum number of concurrent GetMulti() operations. If set to 0, # concurrency is unlimited. # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-get-multi-concurrency @@ -727,16 +718,6 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.index-cache.redis.write-timeout [write_timeout: | default = 3s] - # Amount of time after which client closes idle connections. Should be - # less than server's timeout. -1 disables idle timeout check. - # CLI flag: -blocks-storage.bucket-store.index-cache.redis.idle-timeout - [idle_timeout: | default = 5m] - - # Connection age at which client retires (closes) the connection. - # Default 0 is to not close aged connections. - # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-conn-age - [max_conn_age: | default = 0s] - # Whether to enable tls for redis connection. # CLI flag: -blocks-storage.bucket-store.index-cache.redis.tls-enabled [tls_enabled: | default = false] @@ -847,15 +828,6 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.master-name [master_name: | default = ""] - # Maximum number of socket connections. - # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.pool-size - [pool_size: | default = 100] - - # Specifies the minimum number of idle connections, which is useful when - # it is slow to establish new connections. - # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.min-idle-conns - [min_idle_conns: | default = 10] - # The maximum number of concurrent GetMulti() operations. If set to 0, # concurrency is unlimited. # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-get-multi-concurrency @@ -886,16 +858,6 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.write-timeout [write_timeout: | default = 3s] - # Amount of time after which client closes idle connections. Should be - # less than server's timeout. -1 disables idle timeout check. - # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.idle-timeout - [idle_timeout: | default = 5m] - - # Connection age at which client retires (closes) the connection. - # Default 0 is to not close aged connections. - # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-conn-age - [max_conn_age: | default = 0s] - # Whether to enable tls for redis connection. # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.tls-enabled [tls_enabled: | default = false] @@ -1025,15 +987,6 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.master-name [master_name: | default = ""] - # Maximum number of socket connections. - # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.pool-size - [pool_size: | default = 100] - - # Specifies the minimum number of idle connections, which is useful when - # it is slow to establish new connections. - # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.min-idle-conns - [min_idle_conns: | default = 10] - # The maximum number of concurrent GetMulti() operations. If set to 0, # concurrency is unlimited. # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-get-multi-concurrency @@ -1064,16 +1017,6 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.write-timeout [write_timeout: | default = 3s] - # Amount of time after which client closes idle connections. Should be - # less than server's timeout. -1 disables idle timeout check. - # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.idle-timeout - [idle_timeout: | default = 5m] - - # Connection age at which client retires (closes) the connection. - # Default 0 is to not close aged connections. - # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-conn-age - [max_conn_age: | default = 0s] - # Whether to enable tls for redis connection. # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.tls-enabled [tls_enabled: | default = false] diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 5d185a4896c..d5bcea224c6 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -1127,15 +1127,6 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.index-cache.redis.master-name [master_name: | default = ""] - # Maximum number of socket connections. - # CLI flag: -blocks-storage.bucket-store.index-cache.redis.pool-size - [pool_size: | default = 100] - - # Specifies the minimum number of idle connections, which is useful when - # it is slow to establish new connections. - # CLI flag: -blocks-storage.bucket-store.index-cache.redis.min-idle-conns - [min_idle_conns: | default = 10] - # The maximum number of concurrent GetMulti() operations. If set to 0, # concurrency is unlimited. # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-get-multi-concurrency @@ -1166,16 +1157,6 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.index-cache.redis.write-timeout [write_timeout: | default = 3s] - # Amount of time after which client closes idle connections. Should be - # less than server's timeout. -1 disables idle timeout check. - # CLI flag: -blocks-storage.bucket-store.index-cache.redis.idle-timeout - [idle_timeout: | default = 5m] - - # Connection age at which client retires (closes) the connection. Default - # 0 is to not close aged connections. - # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-conn-age - [max_conn_age: | default = 0s] - # Whether to enable tls for redis connection. # CLI flag: -blocks-storage.bucket-store.index-cache.redis.tls-enabled [tls_enabled: | default = false] @@ -1286,15 +1267,6 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.master-name [master_name: | default = ""] - # Maximum number of socket connections. - # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.pool-size - [pool_size: | default = 100] - - # Specifies the minimum number of idle connections, which is useful when - # it is slow to establish new connections. - # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.min-idle-conns - [min_idle_conns: | default = 10] - # The maximum number of concurrent GetMulti() operations. If set to 0, # concurrency is unlimited. # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-get-multi-concurrency @@ -1325,16 +1297,6 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.write-timeout [write_timeout: | default = 3s] - # Amount of time after which client closes idle connections. Should be - # less than server's timeout. -1 disables idle timeout check. - # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.idle-timeout - [idle_timeout: | default = 5m] - - # Connection age at which client retires (closes) the connection. Default - # 0 is to not close aged connections. - # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-conn-age - [max_conn_age: | default = 0s] - # Whether to enable tls for redis connection. # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.tls-enabled [tls_enabled: | default = false] @@ -1463,15 +1425,6 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.master-name [master_name: | default = ""] - # Maximum number of socket connections. - # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.pool-size - [pool_size: | default = 100] - - # Specifies the minimum number of idle connections, which is useful when - # it is slow to establish new connections. - # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.min-idle-conns - [min_idle_conns: | default = 10] - # The maximum number of concurrent GetMulti() operations. If set to 0, # concurrency is unlimited. # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-get-multi-concurrency @@ -1502,16 +1455,6 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.write-timeout [write_timeout: | default = 3s] - # Amount of time after which client closes idle connections. Should be - # less than server's timeout. -1 disables idle timeout check. - # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.idle-timeout - [idle_timeout: | default = 5m] - - # Connection age at which client retires (closes) the connection. Default - # 0 is to not close aged connections. - # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-conn-age - [max_conn_age: | default = 0s] - # Whether to enable tls for redis connection. # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.tls-enabled [tls_enabled: | default = false] diff --git a/go.mod b/go.mod index f1eac07ba46..1fd1be90a2c 100644 --- a/go.mod +++ b/go.mod @@ -43,8 +43,8 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.25.1-0.20230505130626-263ca5c9438e github.com/prometheus/client_golang v1.15.1 - github.com/prometheus/client_model v0.3.0 - github.com/prometheus/common v0.42.0 + github.com/prometheus/client_model v0.4.0 + github.com/prometheus/common v0.44.0 // Prometheus maps version 2.x.y to tags v0.x.y. github.com/prometheus/prometheus v0.44.1-0.20230530154238-dfae954dc113 github.com/segmentio/fasthash v1.0.3 @@ -53,7 +53,7 @@ require ( github.com/stretchr/testify v1.8.4 github.com/thanos-io/objstore v0.0.0-20230629211010-ff1b35b9841a github.com/thanos-io/promql-engine v0.0.0-20230526105742-791d78b260ea - github.com/thanos-io/thanos v0.31.1-0.20230627154113-7cfaf3fe2d43 + github.com/thanos-io/thanos v0.31.1-0.20230711160112-df3a5f808726 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.etcd.io/etcd/api/v3 v3.5.8 @@ -79,6 +79,7 @@ require ( require ( github.com/cespare/xxhash/v2 v2.2.0 github.com/google/go-cmp v0.5.9 + google.golang.org/protobuf v1.30.0 ) require ( @@ -182,7 +183,7 @@ require ( github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/exporter-toolkit v0.10.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect - github.com/redis/rueidis v1.0.2-go1.18 // indirect + github.com/redis/rueidis v1.0.10-go1.18 // indirect github.com/rs/cors v1.9.0 // indirect github.com/rs/xid v1.5.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect @@ -213,7 +214,7 @@ require ( golang.org/x/crypto v0.9.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/mod v0.10.0 // indirect - golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/oauth2 v0.8.0 // indirect golang.org/x/sys v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/tools v0.9.1 // indirect @@ -224,7 +225,6 @@ require ( google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e // indirect - google.golang.org/protobuf v1.30.0 // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/ini.v1 v1.67.0 // indirect ) diff --git a/go.sum b/go.sum index 53485b38d25..37f30b783e7 100644 --- a/go.sum +++ b/go.sum @@ -562,7 +562,7 @@ github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/efficientgo/core v1.0.0-rc.2 h1:7j62qHLnrZqO3V3UA0AqOGd5d5aXV3AX6m/NZBHp78I= github.com/efficientgo/core v1.0.0-rc.2/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps= -github.com/efficientgo/e2e v0.14.1-0.20230530075226-84bb33e062c4 h1:8w29+abukpj2UWN19wZ4xmOswdrYBjfqmbot9oppp3U= +github.com/efficientgo/e2e v0.14.1-0.20230710114240-c316eb95ae5b h1:8VX23BNufsa4KCqnnEonvI3yrou2Pjp8JLcbdVn0Fs8= github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd h1:VaYzzXeUbC5fVheskcKVNOyJMEYD+HgrJNzIAg/mRIM= github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd/go.mod h1:ZV0utlglOczUWv3ih2AbqPSoLoFzdplUYxwV62eZi6Q= github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= @@ -1070,8 +1070,8 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1: github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= @@ -1079,8 +1079,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= @@ -1097,8 +1097,8 @@ github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJf github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/prometheus v0.44.1-0.20230530154238-dfae954dc113 h1:qocj1KtGsgpSnJzxPE//cto7KiqVG4QYlnqijSSkEMU= github.com/prometheus/prometheus v0.44.1-0.20230530154238-dfae954dc113/go.mod h1:5M/uPiGxpn2eVUzaZLRsmOmXgCODYAptUF6NNUNXjww= -github.com/redis/rueidis v1.0.2-go1.18 h1:ZmiZSZY9Htzn/Ri+vZ5o1snD2inOoqKjezypNqwAgKk= -github.com/redis/rueidis v1.0.2-go1.18/go.mod h1:aJiezBQL+bZKAZ+d7YOuj6xKQhrXvEPBiOfotEhG5R8= +github.com/redis/rueidis v1.0.10-go1.18 h1://Xn/ziz4PsLSZjiDIEyZL0UpI5jE3d57U7JOCAfPUM= +github.com/redis/rueidis v1.0.10-go1.18/go.mod h1:897bvXAEc/mol6VWC0HhkwQZ42Z1Wav86aUcxm+7f44= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1164,8 +1164,8 @@ github.com/thanos-io/objstore v0.0.0-20230629211010-ff1b35b9841a h1:tXcVeuval1nz github.com/thanos-io/objstore v0.0.0-20230629211010-ff1b35b9841a/go.mod h1:5V7lzXuaxwt6XFQoA/zJrhdnQrxq1+r0bwQ1iYOq3gM= github.com/thanos-io/promql-engine v0.0.0-20230526105742-791d78b260ea h1:kzK8sBn2+mo3NAxP+XjAjAqr1hwfxxFUy5CybaBkjAI= github.com/thanos-io/promql-engine v0.0.0-20230526105742-791d78b260ea/go.mod h1:eIgPaXWgOhNAv6CPPrgu09r0AtT7byBTZy+7WkX0D18= -github.com/thanos-io/thanos v0.31.1-0.20230627154113-7cfaf3fe2d43 h1:UHyTPGdDHAoNHuSce5cJ2vEi6g1v8D5ZFBWZ61uTHSM= -github.com/thanos-io/thanos v0.31.1-0.20230627154113-7cfaf3fe2d43/go.mod h1:j2SamKKdmtK42m9gXMjrNDLY4gXEy+6FmPCtNwFL1s8= +github.com/thanos-io/thanos v0.31.1-0.20230711160112-df3a5f808726 h1:DcjKUBKKMckA48Eua9H37+lOs13xDUx1PxixIs9hHHo= +github.com/thanos-io/thanos v0.31.1-0.20230711160112-df3a5f808726/go.mod h1:bDBl+vJEBXNkMvedh10vjDbvYkPyI6r2JJYJG0lLZTo= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1421,8 +1421,8 @@ golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index c4622e4da7e..ff6d0e5b8a9 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -27,6 +27,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" thanos_testutil "github.com/thanos-io/thanos/pkg/testutil/e2eutil" @@ -458,7 +459,7 @@ func TestCompactor_ShouldIncrementCompactionErrorIfFailedToCompactASingleTenant( userID := "test-user" bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{userID}, nil) - bucketClient.MockIter(userID+"/", []string{userID + "/01DTVP434PA9VFXSW2JKB3392D", userID + "/01FN6CDF3PNEWWRY5MPGJPE3EX"}, nil) + bucketClient.MockIter(userID+"/", []string{userID + "/01DTVP434PA9VFXSW2JKB3392D/meta.json", userID + "/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) bucketClient.MockIter(userID+"/markers/", nil, nil) bucketClient.MockExists(path.Join(userID, cortex_tsdb.TenantDeletionMarkPath), false, nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) @@ -512,8 +513,8 @@ func TestCompactor_ShouldIterateOverUsersAndRunCompaction(t *testing.T) { bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) bucketClient.MockExists(path.Join("user-1", cortex_tsdb.TenantDeletionMarkPath), false, nil) bucketClient.MockExists(path.Join("user-2", cortex_tsdb.TenantDeletionMarkPath), false, nil) - bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX"}, nil) - bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ"}, nil) + bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) + bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json"}, nil) bucketClient.MockIter("user-1/markers/", nil, nil) bucketClient.MockIter("user-2/markers/", nil, nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) @@ -767,8 +768,8 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForSkipCompact(t *testing.T) { bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) bucketClient.MockExists(path.Join("user-1", cortex_tsdb.TenantDeletionMarkPath), false, nil) bucketClient.MockExists(path.Join("user-2", cortex_tsdb.TenantDeletionMarkPath), false, nil) - bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX"}, nil) - bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ"}, nil) + bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) + bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json"}, nil) bucketClient.MockIter("user-1/markers/", nil, nil) bucketClient.MockIter("user-2/markers/", nil, nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) @@ -998,8 +999,8 @@ func TestCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInstanceRunni bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) bucketClient.MockExists(path.Join("user-1", cortex_tsdb.TenantDeletionMarkPath), false, nil) bucketClient.MockExists(path.Join("user-2", cortex_tsdb.TenantDeletionMarkPath), false, nil) - bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX"}, nil) - bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ"}, nil) + bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) + bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json"}, nil) bucketClient.MockIter("user-1/markers/", nil, nil) bucketClient.MockIter("user-2/markers/", nil, nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) @@ -1203,7 +1204,7 @@ func TestCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingEnabledWit // Keys with a value greater than 1 will be groups that should be compacted groupHashes := make(map[uint32]int) for _, userID := range userIDs { - blockDirectory := []string{} + blockFiles := []string{} for blockID, blockTimes := range blocks { blockVisitMarker := BlockVisitMarker{ @@ -1218,14 +1219,15 @@ func TestCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingEnabledWit bucketClient.MockGet(userID+"/"+blockID+"/visit-mark.json", string(visitMarkerFileContent), nil) bucketClient.MockGetRequireUpload(userID+"/"+blockID+"/visit-mark.json", string(visitMarkerFileContent), nil) bucketClient.MockUpload(userID+"/"+blockID+"/visit-mark.json", nil) - blockDirectory = append(blockDirectory, userID+"/"+blockID) + // Iter with recursive so expected to get objects rather than directories. + blockFiles = append(blockFiles, path.Join(userID, blockID, block.MetaFilename)) // Get all of the unique group hashes so that they can be used to ensure all groups were compacted groupHash := hashGroup(userID, blockTimes["startTime"], blockTimes["endTime"]) groupHashes[groupHash]++ } - bucketClient.MockIter(userID+"/", blockDirectory, nil) + bucketClient.MockIter(userID+"/", blockFiles, nil) bucketClient.MockIter(userID+"/markers/", nil, nil) bucketClient.MockExists(path.Join(userID, cortex_tsdb.TenantDeletionMarkPath), false, nil) bucketClient.MockGet(userID+"/bucket-index.json.gz", "", nil) diff --git a/pkg/querier/blocks_finder_bucket_scan_test.go b/pkg/querier/blocks_finder_bucket_scan_test.go index 1756b3db9cc..dd8a6df38a0 100644 --- a/pkg/querier/blocks_finder_bucket_scan_test.go +++ b/pkg/querier/blocks_finder_bucket_scan_test.go @@ -95,7 +95,7 @@ func TestBucketScanBlocksFinder_InitialScanFailure(t *testing.T) { // Mock the storage to simulate a failure when reading objects. bucket.MockIter("", []string{"user-1"}, nil) - bucket.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D"}, nil) + bucket.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json"}, nil) bucket.MockExists(path.Join("user-1", cortex_tsdb.TenantDeletionMarkPath), false, nil) bucket.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "invalid", errors.New("mocked error")) diff --git a/pkg/storage/tsdb/redis_client_config.go b/pkg/storage/tsdb/redis_client_config.go index 9c0937d431a..ecf2804960f 100644 --- a/pkg/storage/tsdb/redis_client_config.go +++ b/pkg/storage/tsdb/redis_client_config.go @@ -17,8 +17,6 @@ type RedisClientConfig struct { DB int `yaml:"db"` MasterName string `yaml:"master_name"` - PoolSize int `yaml:"pool_size"` - MinIdleConns int `yaml:"min_idle_conns"` MaxGetMultiConcurrency int `yaml:"max_get_multi_concurrency"` GetMultiBatchSize int `yaml:"get_multi_batch_size"` MaxSetMultiConcurrency int `yaml:"max_set_multi_concurrency"` @@ -27,8 +25,6 @@ type RedisClientConfig struct { DialTimeout time.Duration `yaml:"dial_timeout"` ReadTimeout time.Duration `yaml:"read_timeout"` WriteTimeout time.Duration `yaml:"write_timeout"` - IdleTimeout time.Duration `yaml:"idle_timeout"` - MaxConnAge time.Duration `yaml:"max_conn_age"` TLSEnabled bool `yaml:"tls_enabled"` TLS tls.ClientConfig `yaml:",inline"` @@ -48,10 +44,6 @@ func (cfg *RedisClientConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix st f.DurationVar(&cfg.DialTimeout, prefix+"dial-timeout", time.Second*5, "Client dial timeout.") f.DurationVar(&cfg.ReadTimeout, prefix+"read-timeout", time.Second*3, "Client read timeout.") f.DurationVar(&cfg.WriteTimeout, prefix+"write-timeout", time.Second*3, "Client write timeout.") - f.DurationVar(&cfg.IdleTimeout, prefix+"idle-timeout", time.Minute*5, "Amount of time after which client closes idle connections. Should be less than server's timeout. -1 disables idle timeout check.") - f.DurationVar(&cfg.MaxConnAge, prefix+"max-conn-age", 0, "Connection age at which client retires (closes) the connection. Default 0 is to not close aged connections.") - f.IntVar(&cfg.PoolSize, prefix+"pool-size", 100, "Maximum number of socket connections.") - f.IntVar(&cfg.MinIdleConns, prefix+"min-idle-conns", 10, "Specifies the minimum number of idle connections, which is useful when it is slow to establish new connections.") f.IntVar(&cfg.MaxGetMultiConcurrency, prefix+"max-get-multi-concurrency", 100, "The maximum number of concurrent GetMulti() operations. If set to 0, concurrency is unlimited.") f.IntVar(&cfg.GetMultiBatchSize, prefix+"get-multi-batch-size", 100, "The maximum size per batch for mget.") f.IntVar(&cfg.MaxSetMultiConcurrency, prefix+"max-set-multi-concurrency", 100, "The maximum number of concurrent SetMulti() operations. If set to 0, concurrency is unlimited.") @@ -87,10 +79,6 @@ func (cfg *RedisClientConfig) ToRedisClientConfig() cacheutil.RedisClientConfig DialTimeout: cfg.DialTimeout, ReadTimeout: cfg.ReadTimeout, WriteTimeout: cfg.WriteTimeout, - PoolSize: cfg.PoolSize, - MinIdleConns: cfg.MinIdleConns, - IdleTimeout: cfg.IdleTimeout, - MaxConnAge: cfg.MaxConnAge, MaxGetMultiConcurrency: cfg.MaxGetMultiConcurrency, GetMultiBatchSize: cfg.GetMultiBatchSize, MaxSetMultiConcurrency: cfg.MaxSetMultiConcurrency, diff --git a/pkg/storegateway/bucket_store_metrics.go b/pkg/storegateway/bucket_store_metrics.go index 850ba9d77ac..76bf3e8f883 100644 --- a/pkg/storegateway/bucket_store_metrics.go +++ b/pkg/storegateway/bucket_store_metrics.go @@ -39,6 +39,7 @@ type BucketStoreMetrics struct { seriesFetchDuration *prometheus.Desc postingsFetchDuration *prometheus.Desc + chunkFetchDuration *prometheus.Desc indexHeaderLazyLoadCount *prometheus.Desc indexHeaderLazyLoadFailedCount *prometheus.Desc @@ -147,13 +148,17 @@ func NewBucketStoreMetrics() *BucketStoreMetrics { nil, nil), seriesFetchDuration: prometheus.NewDesc( - "cortex_bucket_store_cached_series_fetch_duration_seconds", + "cortex_bucket_store_series_fetch_duration_seconds", "Time it takes to fetch series to respond a request sent to store-gateway. It includes both the time to fetch it from cache and from storage in case of cache misses.", nil, nil), postingsFetchDuration: prometheus.NewDesc( - "cortex_bucket_store_cached_postings_fetch_duration_seconds", + "cortex_bucket_store_postings_fetch_duration_seconds", "Time it takes to fetch postings to respond a request sent to store-gateway. It includes both the time to fetch it from cache and from storage in case of cache misses.", nil, nil), + chunkFetchDuration: prometheus.NewDesc( + "cortex_bucket_store_chunks_fetch_duration_seconds", + "The total time spent fetching chunks within a single request a store gateway.", + nil, nil), indexHeaderLazyLoadCount: prometheus.NewDesc( "cortex_bucket_store_indexheader_lazy_load_total", @@ -214,6 +219,7 @@ func (m *BucketStoreMetrics) Describe(out chan<- *prometheus.Desc) { out <- m.seriesFetchDuration out <- m.postingsFetchDuration + out <- m.chunkFetchDuration out <- m.indexHeaderLazyLoadCount out <- m.indexHeaderLazyLoadFailedCount @@ -253,8 +259,9 @@ func (m *BucketStoreMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfCountersWithLabels(out, m.cachedPostingsOriginalSizeBytes, "thanos_bucket_store_cached_postings_original_size_bytes_total") data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressedSizeBytes, "thanos_bucket_store_cached_postings_compressed_size_bytes_total") - data.SendSumOfHistograms(out, m.seriesFetchDuration, "thanos_bucket_store_cached_series_fetch_duration_seconds") - data.SendSumOfHistograms(out, m.postingsFetchDuration, "thanos_bucket_store_cached_postings_fetch_duration_seconds") + data.SendSumOfHistograms(out, m.seriesFetchDuration, "thanos_bucket_store_series_fetch_duration_seconds") + data.SendSumOfHistograms(out, m.postingsFetchDuration, "thanos_bucket_store_postings_fetch_duration_seconds") + data.SendSumOfHistograms(out, m.chunkFetchDuration, "thanos_bucket_store_chunks_fetch_duration_seconds") data.SendSumOfCounters(out, m.indexHeaderLazyLoadCount, "thanos_bucket_store_indexheader_lazy_load_total") data.SendSumOfCounters(out, m.indexHeaderLazyLoadFailedCount, "thanos_bucket_store_indexheader_lazy_load_failed_total") diff --git a/pkg/storegateway/bucket_store_metrics_test.go b/pkg/storegateway/bucket_store_metrics_test.go index 6939324c7ac..04fb3def042 100644 --- a/pkg/storegateway/bucket_store_metrics_test.go +++ b/pkg/storegateway/bucket_store_metrics_test.go @@ -397,47 +397,66 @@ func TestBucketStoreMetrics(t *testing.T) { # TYPE cortex_bucket_store_cached_postings_compressed_size_bytes_total counter cortex_bucket_store_cached_postings_compressed_size_bytes_total 1283583 - # HELP cortex_bucket_store_cached_series_fetch_duration_seconds Time it takes to fetch series to respond a request sent to store-gateway. It includes both the time to fetch it from cache and from storage in case of cache misses. - # TYPE cortex_bucket_store_cached_series_fetch_duration_seconds histogram - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="0.001"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="0.01"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="0.1"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="0.3"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="0.6"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="1"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="3"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="6"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="9"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="20"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="30"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="60"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="90"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="120"} 0 - cortex_bucket_store_cached_series_fetch_duration_seconds_bucket{le="+Inf"} 3 - cortex_bucket_store_cached_series_fetch_duration_seconds_sum 1.306102e+06 - cortex_bucket_store_cached_series_fetch_duration_seconds_count 3 + # HELP cortex_bucket_store_series_fetch_duration_seconds Time it takes to fetch series to respond a request sent to store-gateway. It includes both the time to fetch it from cache and from storage in case of cache misses. + # TYPE cortex_bucket_store_series_fetch_duration_seconds histogram + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="0.001"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="0.01"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="0.1"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="0.3"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="0.6"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="1"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="3"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="6"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="9"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="20"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="30"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="60"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="90"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="120"} 0 + cortex_bucket_store_series_fetch_duration_seconds_bucket{le="+Inf"} 3 + cortex_bucket_store_series_fetch_duration_seconds_sum 1.306102e+06 + cortex_bucket_store_series_fetch_duration_seconds_count 3 + # HELP cortex_bucket_store_chunks_fetch_duration_seconds The total time spent fetching chunks within a single request a store gateway. + # TYPE cortex_bucket_store_chunks_fetch_duration_seconds histogram + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="0.001"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="0.01"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="0.1"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="0.3"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="0.6"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="1"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="3"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="6"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="9"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="20"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="30"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="60"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="90"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="120"} 0 + cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="+Inf"} 3 + cortex_bucket_store_chunks_fetch_duration_seconds_sum 1.328621e+06 + cortex_bucket_store_chunks_fetch_duration_seconds_count 3 # HELP cortex_bucket_store_empty_postings_total Total number of empty postings when fetching block series. # TYPE cortex_bucket_store_empty_postings_total counter cortex_bucket_store_empty_postings_total 112595 - # HELP cortex_bucket_store_cached_postings_fetch_duration_seconds Time it takes to fetch postings to respond a request sent to store-gateway. It includes both the time to fetch it from cache and from storage in case of cache misses. - # TYPE cortex_bucket_store_cached_postings_fetch_duration_seconds histogram - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="0.001"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="0.01"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="0.1"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="0.3"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="0.6"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="1"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="3"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="6"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="9"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="20"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="30"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="60"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="90"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="120"} 0 - cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="+Inf"} 3 - cortex_bucket_store_cached_postings_fetch_duration_seconds_sum 1.328621e+06 - cortex_bucket_store_cached_postings_fetch_duration_seconds_count 3 + # HELP cortex_bucket_store_postings_fetch_duration_seconds Time it takes to fetch postings to respond a request sent to store-gateway. It includes both the time to fetch it from cache and from storage in case of cache misses. + # TYPE cortex_bucket_store_postings_fetch_duration_seconds histogram + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="0.001"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="0.01"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="0.1"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="0.3"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="0.6"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="1"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="3"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="6"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="9"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="20"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="30"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="60"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="90"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="120"} 0 + cortex_bucket_store_postings_fetch_duration_seconds_bucket{le="+Inf"} 3 + cortex_bucket_store_postings_fetch_duration_seconds_sum 1.328621e+06 + cortex_bucket_store_postings_fetch_duration_seconds_count 3 # HELP cortex_bucket_store_indexheader_lazy_load_duration_seconds Duration of the index-header lazy loading in seconds. # TYPE cortex_bucket_store_indexheader_lazy_load_duration_seconds histogram @@ -587,6 +606,7 @@ func populateMockedBucketStoreMetrics(base float64) *prometheus.Registry { m.seriesFetchDuration.Observe(58 * base) m.postingsFetchDuration.Observe(59 * base) + m.chunkFetchDuration.Observe(59 * base) m.indexHeaderLazyLoadCount.Add(60 * base) m.indexHeaderLazyLoadFailedCount.Add(61 * base) @@ -628,6 +648,7 @@ type mockedBucketStoreMetrics struct { seriesFetchDuration prometheus.Histogram postingsFetchDuration prometheus.Histogram + chunkFetchDuration prometheus.Histogram indexHeaderLazyLoadCount prometheus.Counter indexHeaderLazyLoadFailedCount prometheus.Counter @@ -750,15 +771,20 @@ func newMockedBucketStoreMetrics(reg prometheus.Registerer) *mockedBucketStoreMe }) m.seriesFetchDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ - Name: "thanos_bucket_store_cached_series_fetch_duration_seconds", + Name: "thanos_bucket_store_series_fetch_duration_seconds", Help: "Time it takes to fetch series from a bucket to respond a query. It also includes the time it takes to cache fetch and store operations.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, }) m.postingsFetchDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ - Name: "thanos_bucket_store_cached_postings_fetch_duration_seconds", + Name: "thanos_bucket_store_postings_fetch_duration_seconds", Help: "Time it takes to fetch postings from a bucket to respond a query. It also includes the time it takes to cache fetch and store operations.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, }) + m.chunkFetchDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "thanos_bucket_store_chunks_fetch_duration_seconds", + Help: "The total time spent fetching chunks within a single request a store gateway.", + Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, + }) m.indexHeaderLazyLoadCount = promauto.With(reg).NewCounter(prometheus.CounterOpts{ Name: "thanos_bucket_store_indexheader_lazy_load_total", diff --git a/pkg/util/metrics_helper.go b/pkg/util/metrics_helper.go index 649200dd534..532912dd945 100644 --- a/pkg/util/metrics_helper.go +++ b/pkg/util/metrics_helper.go @@ -8,13 +8,12 @@ import ( "strings" "sync" - "github.com/gogo/protobuf/proto" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/prometheus/model/labels" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "google.golang.org/protobuf/proto" util_log "github.com/cortexproject/cortex/pkg/util/log" ) @@ -832,7 +831,7 @@ func (m *MergedMetricFamily) CreateMetricFamily() *dto.MetricFamily { for _, metric := range m.metricMap.metrics { for _, m := range metric { - metrics = append(metrics, &m.metric) + metrics = append(metrics, m.metric) } } @@ -856,7 +855,7 @@ func MergeMetricFamilies(metricFamilies []MetricFamilyMap) (MetricFamilyMap, err } for _, metric := range metricFamily.Metric { - (mergedMap[metricName].metricMap).AddOrSetMetric(*metric, mergeFunc) + (mergedMap[metricName].metricMap).AddOrSetMetric(metric, mergeFunc) } } } @@ -924,7 +923,7 @@ type MetricMap struct { } type Metric struct { - metric dto.Metric + metric *dto.Metric lock sync.Mutex } @@ -936,7 +935,7 @@ func NewMetricMap() MetricMap { // AddOrSetMetric - given a metric, see if there's another metric with the same labels. If not, add metric to list // If yes, call mergeFn to merge the two metrics in-place, and updating existing metric -func (m *MetricMap) AddOrSetMetric(metric dto.Metric, mergeFn func(existing *dto.Metric, new *dto.Metric)) { +func (m *MetricMap) AddOrSetMetric(metric *dto.Metric, mergeFn func(existing *dto.Metric, new *dto.Metric)) { var metricLabels []string for _, labelPair := range metric.GetLabel() { metricLabels = append(metricLabels, fmt.Sprintf("%s=%s", labelPair.GetName(), labelPair.GetValue())) @@ -953,7 +952,7 @@ func (m *MetricMap) AddOrSetMetric(metric dto.Metric, mergeFn func(existing *dto same := m.compareLabels(existingMetric.metric.GetLabel(), metric.GetLabel()) if same { existingMetric.lock.Lock() - mergeFn(&existingMetric.metric, &metric) + mergeFn(existingMetric.metric, metric) existingMetric.lock.Unlock() return } diff --git a/pkg/util/metrics_helper_test.go b/pkg/util/metrics_helper_test.go index 3f803005ef8..c6c596e6a11 100644 --- a/pkg/util/metrics_helper_test.go +++ b/pkg/util/metrics_helper_test.go @@ -9,13 +9,13 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/testutil" dto "github.com/prometheus/client_model/go" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) func TestSum(t *testing.T) { diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 35904ea1986..2b5bca4b999 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,25 +1,38 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.20.3 // source: io/prometheus/client/metrics.proto package io_prometheus_client import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type MetricType int32 @@ -38,23 +51,25 @@ const ( MetricType_GAUGE_HISTOGRAM MetricType = 5 ) -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", - 5: "GAUGE_HISTOGRAM", -} - -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, - "GAUGE_HISTOGRAM": 5, -} +// Enum value maps for MetricType. +var ( + MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", + } + MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, + } +) func (x MetricType) Enum() *MetricType { p := new(MetricType) @@ -63,449 +78,519 @@ func (x MetricType) Enum() *MetricType { } func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") +func (MetricType) Descriptor() protoreflect.EnumDescriptor { + return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor() +} + +func (MetricType) Type() protoreflect.EnumType { + return &file_io_prometheus_client_metrics_proto_enumTypes[0] +} + +func (x MetricType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MetricType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = MetricType(value) + *x = MetricType(num) return nil } +// Deprecated: Use MetricType.Descriptor instead. func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} } type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` } -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (m *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(m, src) +func (x *LabelPair) Reset() { + *x = LabelPair{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) + +func (x *LabelPair) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) + +func (*LabelPair) ProtoMessage() {} + +func (x *LabelPair) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_LabelPair proto.InternalMessageInfo +// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead. +func (*LabelPair) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} +} -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *LabelPair) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value +func (x *LabelPair) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value } return "" } type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{1} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (m *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(m, src) +func (x *Gauge) Reset() { + *x = Gauge{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) + +func (x *Gauge) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) + +func (*Gauge) ProtoMessage() {} + +func (x *Gauge) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Gauge proto.InternalMessageInfo +// Deprecated: Use Gauge.ProtoReflect.Descriptor instead. +func (*Gauge) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1} +} -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Gauge) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{2} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` } -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (m *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(m, src) +func (x *Counter) Reset() { + *x = Counter{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) + +func (x *Counter) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) + +func (*Counter) ProtoMessage() {} + +func (x *Counter) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Counter proto.InternalMessageInfo +// Deprecated: Use Counter.ProtoReflect.Descriptor instead. +func (*Counter) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2} +} -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Counter) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Counter) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Counter) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} -func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{3} + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` } -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) -} -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (m *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(m, src) +func (x *Quantile) Reset() { + *x = Quantile{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) + +func (x *Quantile) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) + +func (*Quantile) ProtoMessage() {} + +func (x *Quantile) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Quantile proto.InternalMessageInfo +// Deprecated: Use Quantile.ProtoReflect.Descriptor instead. +func (*Quantile) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3} +} -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile +func (x *Quantile) GetQuantile() float64 { + if x != nil && x.Quantile != nil { + return *x.Quantile } return 0 } -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Quantile) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} -func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{4} + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` } -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (m *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(m, src) +func (x *Summary) Reset() { + *x = Summary{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) + +func (x *Summary) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) + +func (*Summary) ProtoMessage() {} + +func (x *Summary) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Summary proto.InternalMessageInfo +// Deprecated: Use Summary.ProtoReflect.Descriptor instead. +func (*Summary) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4} +} -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Summary) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Summary) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile +func (x *Summary) GetQuantile() []*Quantile { + if x != nil { + return x.Quantile } return nil } type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} -func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{5} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) -} -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (m *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(m, src) +func (x *Untyped) Reset() { + *x = Untyped{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) + +func (x *Untyped) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) + +func (*Untyped) ProtoMessage() {} + +func (x *Untyped) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Untyped proto.InternalMessageInfo +// Deprecated: Use Untyped.ProtoReflect.Descriptor instead. +func (*Untyped) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5} +} -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Untyped) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Histogram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` + SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` // Buckets for the conventional histogram. - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). // In the future, more bucket schemas may be added using numbers < -4 or > 8. Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` - ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` - ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` - ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` + ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket. + ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket. + ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0. // Negative buckets for the native histogram. NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` // Use either "negative_delta" or "negative_count", the former for // regular histograms with integer counts, the latter for float // histograms. - NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` - NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. // Positive buckets for the native histogram. PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float // histograms. - PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` - PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. } -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{6} +func (x *Histogram) Reset() { + *x = Histogram{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) +func (x *Histogram) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) -} -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) -} -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) + +func (*Histogram) ProtoMessage() {} + +func (x *Histogram) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Histogram proto.InternalMessageInfo +// Deprecated: Use Histogram.ProtoReflect.Descriptor instead. +func (*Histogram) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6} +} -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Histogram) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Histogram) GetSampleCountFloat() float64 { - if m != nil && m.SampleCountFloat != nil { - return *m.SampleCountFloat +func (x *Histogram) GetSampleCountFloat() float64 { + if x != nil && x.SampleCountFloat != nil { + return *x.SampleCountFloat } return 0 } -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Histogram) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket +func (x *Histogram) GetBucket() []*Bucket { + if x != nil { + return x.Bucket } return nil } -func (m *Histogram) GetSchema() int32 { - if m != nil && m.Schema != nil { - return *m.Schema +func (x *Histogram) GetSchema() int32 { + if x != nil && x.Schema != nil { + return *x.Schema } return 0 } -func (m *Histogram) GetZeroThreshold() float64 { - if m != nil && m.ZeroThreshold != nil { - return *m.ZeroThreshold +func (x *Histogram) GetZeroThreshold() float64 { + if x != nil && x.ZeroThreshold != nil { + return *x.ZeroThreshold } return 0 } -func (m *Histogram) GetZeroCount() uint64 { - if m != nil && m.ZeroCount != nil { - return *m.ZeroCount +func (x *Histogram) GetZeroCount() uint64 { + if x != nil && x.ZeroCount != nil { + return *x.ZeroCount } return 0 } -func (m *Histogram) GetZeroCountFloat() float64 { - if m != nil && m.ZeroCountFloat != nil { - return *m.ZeroCountFloat +func (x *Histogram) GetZeroCountFloat() float64 { + if x != nil && x.ZeroCountFloat != nil { + return *x.ZeroCountFloat } return 0 } -func (m *Histogram) GetNegativeSpan() []*BucketSpan { - if m != nil { - return m.NegativeSpan +func (x *Histogram) GetNegativeSpan() []*BucketSpan { + if x != nil { + return x.NegativeSpan } return nil } -func (m *Histogram) GetNegativeDelta() []int64 { - if m != nil { - return m.NegativeDelta +func (x *Histogram) GetNegativeDelta() []int64 { + if x != nil { + return x.NegativeDelta } return nil } -func (m *Histogram) GetNegativeCount() []float64 { - if m != nil { - return m.NegativeCount +func (x *Histogram) GetNegativeCount() []float64 { + if x != nil { + return x.NegativeCount } return nil } -func (m *Histogram) GetPositiveSpan() []*BucketSpan { - if m != nil { - return m.PositiveSpan +func (x *Histogram) GetPositiveSpan() []*BucketSpan { + if x != nil { + return x.PositiveSpan } return nil } -func (m *Histogram) GetPositiveDelta() []int64 { - if m != nil { - return m.PositiveDelta +func (x *Histogram) GetPositiveDelta() []int64 { + if x != nil { + return x.PositiveDelta } return nil } -func (m *Histogram) GetPositiveCount() []float64 { - if m != nil { - return m.PositiveCount +func (x *Histogram) GetPositiveCount() []float64 { + if x != nil { + return x.PositiveCount } return nil } @@ -513,64 +598,72 @@ func (m *Histogram) GetPositiveCount() []float64 { // A Bucket of a conventional histogram, each of which is treated as // an individual counter-like time series by Prometheus. type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order. + CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0. + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive. Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} -func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{7} +func (x *Bucket) Reset() { + *x = Bucket{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) -} -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) -} -func (m *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(m, src) -} -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) +func (x *Bucket) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) + +func (*Bucket) ProtoMessage() {} + +func (x *Bucket) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Bucket proto.InternalMessageInfo +// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. +func (*Bucket) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7} +} -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount +func (x *Bucket) GetCumulativeCount() uint64 { + if x != nil && x.CumulativeCount != nil { + return *x.CumulativeCount } return 0 } -func (m *Bucket) GetCumulativeCountFloat() float64 { - if m != nil && m.CumulativeCountFloat != nil { - return *m.CumulativeCountFloat +func (x *Bucket) GetCumulativeCountFloat() float64 { + if x != nil && x.CumulativeCountFloat != nil { + return *x.CumulativeCountFloat } return 0 } -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound +func (x *Bucket) GetUpperBound() float64 { + if x != nil && x.UpperBound != nil { + return *x.UpperBound } return 0 } -func (m *Bucket) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Bucket) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } @@ -582,333 +675,658 @@ func (m *Bucket) GetExemplar() *Exemplar { // structured here (with all the buckets in a single array separate // from the Spans). type BucketSpan struct { - Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` - Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *BucketSpan) Reset() { *m = BucketSpan{} } -func (m *BucketSpan) String() string { return proto.CompactTextString(m) } -func (*BucketSpan) ProtoMessage() {} -func (*BucketSpan) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{8} + Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative). + Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets. } -func (m *BucketSpan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BucketSpan.Unmarshal(m, b) -} -func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) -} -func (m *BucketSpan) XXX_Merge(src proto.Message) { - xxx_messageInfo_BucketSpan.Merge(m, src) +func (x *BucketSpan) Reset() { + *x = BucketSpan{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BucketSpan) XXX_Size() int { - return xxx_messageInfo_BucketSpan.Size(m) + +func (x *BucketSpan) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BucketSpan) XXX_DiscardUnknown() { - xxx_messageInfo_BucketSpan.DiscardUnknown(m) + +func (*BucketSpan) ProtoMessage() {} + +func (x *BucketSpan) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BucketSpan proto.InternalMessageInfo +// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead. +func (*BucketSpan) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8} +} -func (m *BucketSpan) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset +func (x *BucketSpan) GetOffset() int32 { + if x != nil && x.Offset != nil { + return *x.Offset } return 0 } -func (m *BucketSpan) GetLength() uint32 { - if m != nil && m.Length != nil { - return *m.Length +func (x *BucketSpan) GetLength() uint32 { + if x != nil && x.Length != nil { + return *x.Length } return 0 } type Exemplar struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} -func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{9} + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style. } -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Exemplar.Unmarshal(m, b) -} -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) -} -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) +func (x *Exemplar) Reset() { + *x = Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Exemplar) XXX_Size() int { - return xxx_messageInfo_Exemplar.Size(m) + +func (x *Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) + +func (*Exemplar) ProtoMessage() {} + +func (x *Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Exemplar proto.InternalMessageInfo +// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead. +func (*Exemplar) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9} +} -func (m *Exemplar) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Exemplar) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Exemplar) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Exemplar) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp +func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{10} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` +} + +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) + +func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Metric proto.InternalMessageInfo +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. +func (*Metric) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10} +} -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Metric) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge +func (x *Metric) GetGauge() *Gauge { + if x != nil { + return x.Gauge } return nil } -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter +func (x *Metric) GetCounter() *Counter { + if x != nil { + return x.Counter } return nil } -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary +func (x *Metric) GetSummary() *Summary { + if x != nil { + return x.Summary } return nil } -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped +func (x *Metric) GetUntyped() *Untyped { + if x != nil { + return x.Untyped } return nil } -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram +func (x *Metric) GetHistogram() *Histogram { + if x != nil { + return x.Histogram } return nil } -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs +func (x *Metric) GetTimestampMs() int64 { + if x != nil && x.TimestampMs != nil { + return *x.TimestampMs } return 0 } type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{11} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` +} + +func (x *MetricFamily) Reset() { + *x = MetricFamily{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (m *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(m, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) +func (x *MetricFamily) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) + +func (*MetricFamily) ProtoMessage() {} + +func (x *MetricFamily) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo +// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead. +func (*MetricFamily) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11} +} -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *MetricFamily) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help +func (x *MetricFamily) GetHelp() string { + if x != nil && x.Help != nil { + return *x.Help } return "" } -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type +func (x *MetricFamily) GetType() MetricType { + if x != nil && x.Type != nil { + return *x.Type } return MetricType_COUNTER } -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric +func (x *MetricFamily) GetMetric() []*Metric { + if x != nil { + return x.Metric } return nil } -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") - proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") -} - -func init() { - proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) -} - -var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 896 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, - 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48, - 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92, - 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0, - 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b, - 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3, - 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90, - 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25, - 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb, - 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19, - 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba, - 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b, - 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c, - 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0, - 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5, - 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd, - 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d, - 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b, - 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b, - 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f, - 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b, - 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8, - 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e, - 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79, - 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29, - 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48, - 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca, - 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9, - 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5, - 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe, - 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55, - 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e, - 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83, - 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65, - 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a, - 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8, - 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf, - 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1, - 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97, - 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc, - 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7, - 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b, - 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58, - 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b, - 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8, - 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f, - 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67, - 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28, - 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27, - 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41, - 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f, - 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f, - 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac, - 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a, - 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab, - 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00, +var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor + +var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, + 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a, + 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, + 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, + 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, + 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, + 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, + 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, + 0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, + 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, + 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, + 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, + 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, + 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, + 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, + 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, + 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, + 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, + 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, + 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, + 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, + 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, + 0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, + 0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, + 0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, + 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, + 0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, + 0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, + 0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, +} + +var ( + file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once + file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc +) + +func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte { + file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() { + file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData) + }) + return file_io_prometheus_client_metrics_proto_rawDescData +} + +var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{ + (MetricType)(0), // 0: io.prometheus.client.MetricType + (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair + (*Gauge)(nil), // 2: io.prometheus.client.Gauge + (*Counter)(nil), // 3: io.prometheus.client.Counter + (*Quantile)(nil), // 4: io.prometheus.client.Quantile + (*Summary)(nil), // 5: io.prometheus.client.Summary + (*Untyped)(nil), // 6: io.prometheus.client.Untyped + (*Histogram)(nil), // 7: io.prometheus.client.Histogram + (*Bucket)(nil), // 8: io.prometheus.client.Bucket + (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan + (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar + (*Metric)(nil), // 11: io.prometheus.client.Metric + (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp +} +var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ + 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar + 4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile + 8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket + 9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan + 9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan + 10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_io_prometheus_client_metrics_proto_init() } +func file_io_prometheus_client_metrics_proto_init() { + if File_io_prometheus_client_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LabelPair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Gauge); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Counter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Quantile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Summary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Untyped); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Histogram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BucketSpan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricFamily); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc, + NumEnums: 1, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_io_prometheus_client_metrics_proto_goTypes, + DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs, + EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes, + MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes, + }.Build() + File_io_prometheus_client_metrics_proto = out.File + file_io_prometheus_client_metrics_proto_rawDesc = nil + file_io_prometheus_client_metrics_proto_goTypes = nil + file_io_prometheus_client_metrics_proto_depIdxs = nil } diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 73163206419..37aa966748b 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -579,8 +579,7 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT // No need for a RoundTripper that reloads the CA file automatically. return newRT(tlsConfig) } - - return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, cfg.TLSConfig.CertFile, cfg.TLSConfig.KeyFile, newRT) + return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.roundTripperSettings(), newRT) } type authorizationCredentialsRoundTripper struct { @@ -750,7 +749,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro if len(rt.config.TLSConfig.CAFile) == 0 { t, _ = tlsTransport(tlsConfig) } else { - t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.CAFile, rt.config.TLSConfig.CertFile, rt.config.TLSConfig.KeyFile, tlsTransport) + t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.roundTripperSettings(), tlsTransport) if err != nil { return nil, err } @@ -817,6 +816,10 @@ func cloneRequest(r *http.Request) *http.Request { // NewTLSConfig creates a new tls.Config from the given TLSConfig. func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { + if err := cfg.Validate(); err != nil { + return nil, err + } + tlsConfig := &tls.Config{ InsecureSkipVerify: cfg.InsecureSkipVerify, MinVersion: uint16(cfg.MinVersion), @@ -831,7 +834,11 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { // If a CA cert is provided then let's read it in so we can validate the // scrape target's certificate properly. - if len(cfg.CAFile) > 0 { + if len(cfg.CA) > 0 { + if !updateRootCA(tlsConfig, []byte(cfg.CA)) { + return nil, fmt.Errorf("unable to use inline CA cert") + } + } else if len(cfg.CAFile) > 0 { b, err := readCAFile(cfg.CAFile) if err != nil { return nil, err @@ -844,12 +851,9 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { if len(cfg.ServerName) > 0 { tlsConfig.ServerName = cfg.ServerName } + // If a client cert & key is provided then configure TLS config accordingly. - if len(cfg.CertFile) > 0 && len(cfg.KeyFile) == 0 { - return nil, fmt.Errorf("client cert file %q specified without client key file", cfg.CertFile) - } else if len(cfg.KeyFile) > 0 && len(cfg.CertFile) == 0 { - return nil, fmt.Errorf("client key file %q specified without client cert file", cfg.KeyFile) - } else if len(cfg.CertFile) > 0 && len(cfg.KeyFile) > 0 { + if cfg.usingClientCert() && cfg.usingClientKey() { // Verify that client cert and key are valid. if _, err := cfg.getClientCertificate(nil); err != nil { return nil, err @@ -862,6 +866,12 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { // TLSConfig configures the options for TLS connections. type TLSConfig struct { + // Text of the CA cert to use for the targets. + CA string `yaml:"ca,omitempty" json:"ca,omitempty"` + // Text of the client cert file for the targets. + Cert string `yaml:"cert,omitempty" json:"cert,omitempty"` + // Text of the client key file for the targets. + Key Secret `yaml:"key,omitempty" json:"key,omitempty"` // The CA cert to use for the targets. CAFile string `yaml:"ca_file,omitempty" json:"ca_file,omitempty"` // The client cert file for the targets. @@ -891,29 +901,77 @@ func (c *TLSConfig) SetDirectory(dir string) { // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { type plain TLSConfig - return unmarshal((*plain)(c)) + if err := unmarshal((*plain)(c)); err != nil { + return err + } + return c.Validate() } -// readCertAndKey reads the cert and key files from the disk. -func readCertAndKey(certFile, keyFile string) ([]byte, []byte, error) { - certData, err := os.ReadFile(certFile) - if err != nil { - return nil, nil, err +// Validate validates the TLSConfig to check that only one of the inlined or +// file-based fields for the TLS CA, client certificate, and client key are +// used. +func (c *TLSConfig) Validate() error { + if len(c.CA) > 0 && len(c.CAFile) > 0 { + return fmt.Errorf("at most one of ca and ca_file must be configured") + } + if len(c.Cert) > 0 && len(c.CertFile) > 0 { + return fmt.Errorf("at most one of cert and cert_file must be configured") + } + if len(c.Key) > 0 && len(c.KeyFile) > 0 { + return fmt.Errorf("at most one of key and key_file must be configured") } - keyData, err := os.ReadFile(keyFile) - if err != nil { - return nil, nil, err + if c.usingClientCert() && !c.usingClientKey() { + return fmt.Errorf("exactly one of key or key_file must be configured when a client certificate is configured") + } else if c.usingClientKey() && !c.usingClientCert() { + return fmt.Errorf("exactly one of cert or cert_file must be configured when a client key is configured") } - return certData, keyData, nil + return nil +} + +func (c *TLSConfig) usingClientCert() bool { + return len(c.Cert) > 0 || len(c.CertFile) > 0 +} + +func (c *TLSConfig) usingClientKey() bool { + return len(c.Key) > 0 || len(c.KeyFile) > 0 +} + +func (c *TLSConfig) roundTripperSettings() TLSRoundTripperSettings { + return TLSRoundTripperSettings{ + CA: c.CA, + CAFile: c.CAFile, + Cert: c.Cert, + CertFile: c.CertFile, + Key: string(c.Key), + KeyFile: c.KeyFile, + } } // getClientCertificate reads the pair of client cert and key from disk and returns a tls.Certificate. func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) { - certData, keyData, err := readCertAndKey(c.CertFile, c.KeyFile) - if err != nil { - return nil, fmt.Errorf("unable to read specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err) + var ( + certData, keyData []byte + err error + ) + + if c.CertFile != "" { + certData, err = os.ReadFile(c.CertFile) + if err != nil { + return nil, fmt.Errorf("unable to read specified client cert (%s): %s", c.CertFile, err) + } + } else { + certData = []byte(c.Cert) + } + + if c.KeyFile != "" { + keyData, err = os.ReadFile(c.KeyFile) + if err != nil { + return nil, fmt.Errorf("unable to read specified client key (%s): %s", c.KeyFile, err) + } + } else { + keyData = []byte(c.Key) } cert, err := tls.X509KeyPair(certData, keyData) @@ -946,30 +1004,32 @@ func updateRootCA(cfg *tls.Config, b []byte) bool { // tlsRoundTripper is a RoundTripper that updates automatically its TLS // configuration whenever the content of the CA file changes. type tlsRoundTripper struct { - caFile string - certFile string - keyFile string + settings TLSRoundTripperSettings // newRT returns a new RoundTripper. newRT func(*tls.Config) (http.RoundTripper, error) mtx sync.RWMutex rt http.RoundTripper - hashCAFile []byte - hashCertFile []byte - hashKeyFile []byte + hashCAData []byte + hashCertData []byte + hashKeyData []byte tlsConfig *tls.Config } +type TLSRoundTripperSettings struct { + CA, CAFile string + Cert, CertFile string + Key, KeyFile string +} + func NewTLSRoundTripper( cfg *tls.Config, - caFile, certFile, keyFile string, + settings TLSRoundTripperSettings, newRT func(*tls.Config) (http.RoundTripper, error), ) (http.RoundTripper, error) { t := &tlsRoundTripper{ - caFile: caFile, - certFile: certFile, - keyFile: keyFile, + settings: settings, newRT: newRT, tlsConfig: cfg, } @@ -979,7 +1039,7 @@ func NewTLSRoundTripper( return nil, err } t.rt = rt - _, t.hashCAFile, t.hashCertFile, t.hashKeyFile, err = t.getTLSFilesWithHash() + _, t.hashCAData, t.hashCertData, t.hashKeyData, err = t.getTLSDataWithHash() if err != nil { return nil, err } @@ -987,36 +1047,66 @@ func NewTLSRoundTripper( return t, nil } -func (t *tlsRoundTripper) getTLSFilesWithHash() ([]byte, []byte, []byte, []byte, error) { - b1, err := readCAFile(t.caFile) - if err != nil { - return nil, nil, nil, nil, err +func (t *tlsRoundTripper) getTLSDataWithHash() ([]byte, []byte, []byte, []byte, error) { + var ( + caBytes, certBytes, keyBytes []byte + + err error + ) + + if t.settings.CAFile != "" { + caBytes, err = os.ReadFile(t.settings.CAFile) + if err != nil { + return nil, nil, nil, nil, err + } + } else if t.settings.CA != "" { + caBytes = []byte(t.settings.CA) + } + + if t.settings.CertFile != "" { + certBytes, err = os.ReadFile(t.settings.CertFile) + if err != nil { + return nil, nil, nil, nil, err + } + } else if t.settings.Cert != "" { + certBytes = []byte(t.settings.Cert) } - h1 := sha256.Sum256(b1) - var h2, h3 [32]byte - if t.certFile != "" { - b2, b3, err := readCertAndKey(t.certFile, t.keyFile) + if t.settings.KeyFile != "" { + keyBytes, err = os.ReadFile(t.settings.KeyFile) if err != nil { return nil, nil, nil, nil, err } - h2, h3 = sha256.Sum256(b2), sha256.Sum256(b3) + } else if t.settings.Key != "" { + keyBytes = []byte(t.settings.Key) + } + + var caHash, certHash, keyHash [32]byte + + if len(caBytes) > 0 { + caHash = sha256.Sum256(caBytes) + } + if len(certBytes) > 0 { + certHash = sha256.Sum256(certBytes) + } + if len(keyBytes) > 0 { + keyHash = sha256.Sum256(keyBytes) } - return b1, h1[:], h2[:], h3[:], nil + return caBytes, caHash[:], certHash[:], keyHash[:], nil } // RoundTrip implements the http.RoundTrip interface. func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - caData, caHash, certHash, keyHash, err := t.getTLSFilesWithHash() + caData, caHash, certHash, keyHash, err := t.getTLSDataWithHash() if err != nil { return nil, err } t.mtx.RLock() - equal := bytes.Equal(caHash[:], t.hashCAFile) && - bytes.Equal(certHash[:], t.hashCertFile) && - bytes.Equal(keyHash[:], t.hashKeyFile) + equal := bytes.Equal(caHash[:], t.hashCAData) && + bytes.Equal(certHash[:], t.hashCertData) && + bytes.Equal(keyHash[:], t.hashKeyData) rt := t.rt t.mtx.RUnlock() if equal { @@ -1029,7 +1119,7 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { // using GetClientCertificate. tlsConfig := t.tlsConfig.Clone() if !updateRootCA(tlsConfig, caData) { - return nil, fmt.Errorf("unable to use specified CA cert %s", t.caFile) + return nil, fmt.Errorf("unable to use specified CA cert %s", t.settings.CAFile) } rt, err = t.newRT(tlsConfig) if err != nil { @@ -1039,9 +1129,9 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { t.mtx.Lock() t.rt = rt - t.hashCAFile = caHash[:] - t.hashCertFile = certHash[:] - t.hashKeyFile = keyHash[:] + t.hashCAData = caHash[:] + t.hashCertData = certHash[:] + t.hashKeyData = keyHash[:] t.mtx.Unlock() return rt.RoundTrip(req) diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index f4fc8845522..90639781513 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -132,7 +132,10 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error { } // Pick off one MetricFamily per Decode until there's nothing left. for key, fam := range d.fams { - *v = *fam + v.Name = fam.Name + v.Help = fam.Help + v.Type = fam.Type + v.Metric = fam.Metric delete(d.fams, key) return nil } diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 64dc0eb40c2..7f611ffaad7 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,9 +18,9 @@ import ( "io" "net/http" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "google.golang.org/protobuf/encoding/prototext" dto "github.com/prometheus/client_model/go" ) @@ -99,8 +99,11 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { return FmtText } - if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { - return FmtOpenMetrics + if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { + if ver == OpenMetricsVersion_1_0_0 { + return FmtOpenMetrics_1_0_0 + } + return FmtOpenMetrics_0_0_1 } } return FmtText @@ -133,7 +136,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { case FmtProtoText: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + _, err := fmt.Fprintln(w, prototext.Format(v)) return err }, close: func() error { return nil }, @@ -146,7 +149,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { }, close: func() error { return nil }, } - case FmtOpenMetrics: + case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0: return encoderCloser{ encode: func(v *dto.MetricFamily) error { _, err := MetricFamilyToOpenMetrics(w, v) diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 0f176fa64f2..c4cb20f0d3e 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -19,20 +19,22 @@ type Format string // Constants to assemble the Content-Type values for the different wire protocols. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` - OpenMetricsVersion = "0.0.1" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + OpenMetricsVersion_0_0_1 = "0.0.1" + OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index ac2482782c7..35db1cc9d73 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -24,8 +24,8 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) // A stateFn is a function that represents a state in a state machine. By diff --git a/vendor/github.com/redis/rueidis/.gitignore b/vendor/github.com/redis/rueidis/.gitignore index 6cb4d50ce94..f1db70daef6 100644 --- a/vendor/github.com/redis/rueidis/.gitignore +++ b/vendor/github.com/redis/rueidis/.gitignore @@ -1,3 +1,3 @@ .idea/ dist/ - +vendor/ diff --git a/vendor/github.com/redis/rueidis/README.md b/vendor/github.com/redis/rueidis/README.md index 021b65b18b8..7028dd47a36 100644 --- a/vendor/github.com/redis/rueidis/README.md +++ b/vendor/github.com/redis/rueidis/README.md @@ -1,7 +1,7 @@ # rueidis [![Go Reference](https://pkg.go.dev/badge/github.com/redis/rueidis.svg)](https://pkg.go.dev/github.com/redis/rueidis) -[![Build status](https://badge.buildkite.com/d15fbd91b3b22b55c8d799564f84918a322118ae02590858c4.svg)](https://buildkite.com/rueian/rueidis) +[![CircleCI](https://dl.circleci.com/status-badge/img/gh/redis/rueidis/tree/main.svg?style=shield)](https://dl.circleci.com/status-badge/redirect/gh/redis/rueidis/tree/main) [![Go Report Card](https://goreportcard.com/badge/github.com/redis/rueidis)](https://goreportcard.com/report/github.com/redis/rueidis) [![codecov](https://codecov.io/gh/redis/rueidis/branch/master/graph/badge.svg?token=wGTB8GdY06)](https://codecov.io/gh/redis/rueidis) @@ -31,9 +31,7 @@ import ( ) func main() { - client, err := rueidis.NewClient(rueidis.ClientOption{ - InitAddress: []string{"127.0.0.1:6379"}, - }) + client, err := rueidis.NewClient(rueidis.ClientOption{InitAddress: []string{"127.0.0.1:6379"}}) if err != nil { panic(err) } @@ -42,9 +40,13 @@ func main() { ctx := context.Background() // SET key val NX err = client.Do(ctx, client.B().Set().Key("key").Value("val").Nx().Build()).Error() + // HGETALL hm + hm, err := client.Do(ctx, client.B().Hgetall().Key("hm").Build()).AsStrMap() } ``` +Checkout more examples: [Command Response Cheatsheet](https://github.com/redis/rueidis#command-response-cheatsheet) + ## Developer friendly Command Builder `client.B()` is the builder entrypoint to construct a redis command: @@ -397,6 +399,8 @@ client.Do(ctx, client.B().Lpop().Key("k").Build()).ToString() client.Do(ctx, client.B().Lpop().Key("k").Count(2).Build()).AsStrSlice() // FT.SEARCH client.Do(ctx, client.B().FtSearch().Index("idx").Query("@f:v").Build()).AsFtSearch() +// GEOSEARCH +client.Do(ctx, client.B().Geosearch().Key("k").Fromlonlat(1, 1).Bybox(1).Height(1).Km().Build()).AsGeosearch() ``` ## Supporting Go mod 1.18 @@ -411,5 +415,5 @@ module mymodule go 1.18 -require github.com/redis/rueidis v1.0.0-go1.18 -``` \ No newline at end of file +require github.com/redis/rueidis v1.0.9-go1.18 +``` diff --git a/vendor/github.com/redis/rueidis/client.go b/vendor/github.com/redis/rueidis/client.go index baf60f76797..ab4e4faa9a5 100644 --- a/vendor/github.com/redis/rueidis/client.go +++ b/vendor/github.com/redis/rueidis/client.go @@ -53,7 +53,7 @@ func (c *singleClient) DoMulti(ctx context.Context, multi ...Completed) (resps [ return nil } retry: - resps = c.conn.DoMulti(ctx, multi...) + resps = c.conn.DoMulti(ctx, multi...).s if c.retry && allReadOnly(multi) { for _, resp := range resps { if c.isRetryable(resp.NonRedisError(), ctx) { @@ -74,7 +74,7 @@ func (c *singleClient) DoMultiCache(ctx context.Context, multi ...CacheableTTL) return nil } retry: - resps = c.conn.DoMultiCache(ctx, multi...) + resps = c.conn.DoMultiCache(ctx, multi...).s if c.retry { for _, resp := range resps { if c.isRetryable(resp.NonRedisError(), ctx) { @@ -173,7 +173,7 @@ func (c *dedicatedSingleClient) DoMulti(ctx context.Context, multi ...Completed) retryable = allReadOnly(multi) } retry: - resp = c.wire.DoMulti(ctx, multi...) + resp = c.wire.DoMulti(ctx, multi...).s if retryable && anyRetryable(resp, c.wire, ctx) { goto retry } diff --git a/vendor/github.com/redis/rueidis/cluster.go b/vendor/github.com/redis/rueidis/cluster.go index ee0dd39dda7..353d04ac87b 100644 --- a/vendor/github.com/redis/rueidis/cluster.go +++ b/vendor/github.com/redis/rueidis/cluster.go @@ -3,8 +3,9 @@ package rueidis import ( "context" "errors" - "fmt" + "net" "runtime" + "strconv" "sync" "sync/atomic" "time" @@ -16,6 +17,56 @@ import ( // ErrNoSlot indicates that there is no redis node owns the key slot. var ErrNoSlot = errors.New("the slot has no redis node") +type retry struct { + cIndexes []int + commands []Completed + aIndexes []int + cAskings []Completed +} + +func (r *retry) Capacity() int { + return cap(r.commands) +} + +func (r *retry) ResetLen(n int) { + r.cIndexes = r.cIndexes[:n] + r.commands = r.commands[:n] + r.aIndexes = r.aIndexes[:0] + r.cAskings = r.cAskings[:0] +} + +var retryp = util.NewPool(func(capacity int) *retry { + return &retry{ + cIndexes: make([]int, 0, capacity), + commands: make([]Completed, 0, capacity), + } +}) + +type retrycache struct { + cIndexes []int + commands []CacheableTTL + aIndexes []int + cAskings []CacheableTTL +} + +func (r *retrycache) Capacity() int { + return cap(r.commands) +} + +func (r *retrycache) ResetLen(n int) { + r.cIndexes = r.cIndexes[:n] + r.commands = r.commands[:n] + r.aIndexes = r.aIndexes[:0] + r.cAskings = r.cAskings[:0] +} + +var retrycachep = util.NewPool(func(capacity int) *retrycache { + return &retrycache{ + cIndexes: make([]int, 0, capacity), + commands: make([]CacheableTTL, 0, capacity), + } +}) + type clusterClient struct { slots [16384]conn opt *ClientOption @@ -85,35 +136,45 @@ func (c *clusterClient) refresh() (err error) { return c.sc.Do(c._refresh) } +type clusterslots struct { + reply RedisResult + addr string +} + func (c *clusterClient) _refresh() (err error) { var reply RedisMessage + var addr string -retry: c.mu.RLock() - results := make(chan RedisResult, len(c.conns)) + results := make(chan clusterslots, len(c.conns)) + pending := make([]conn, 0, len(c.conns)) for _, cc := range c.conns { - go func(c conn) { results <- c.Do(context.Background(), cmds.SlotCmd) }(cc) + pending = append(pending, cc) } c.mu.RUnlock() for i := 0; i < cap(results); i++ { - if reply, err = (<-results).ToMessage(); len(reply.values) != 0 { + if i&3 == 0 { // batch CLUSTER SLOTS for every 4 connections + for j := i; j < i+4 && j < len(pending); j++ { + go func(c conn) { + results <- clusterslots{reply: c.Do(context.Background(), cmds.SlotCmd), addr: c.Addr()} + }(pending[j]) + } + } + r := <-results + addr = r.addr + reply, err = r.reply.ToMessage() + if len(reply.values) != 0 { break } } + pending = nil if err != nil { return err } - if len(reply.values) == 0 { - if err = c.init(); err != nil { - return err - } - goto retry - } - - groups := parseSlots(reply) + groups := parseSlots(reply, addr) // TODO support read from replicas conns := make(map[string]conn, len(groups)) @@ -156,12 +217,14 @@ retry: c.conns = conns c.mu.Unlock() - go func(removes []conn) { - time.Sleep(time.Second * 5) - for _, cc := range removes { - cc.Close() - } - }(removes) + if len(removes) > 0 { + go func(removes []conn) { + time.Sleep(time.Second * 5) + for _, cc := range removes { + cc.Close() + } + }(removes) + } return nil } @@ -185,16 +248,34 @@ type group struct { slots [][2]int64 } -func parseSlots(slots RedisMessage) map[string]group { +// parseSlots - map redis slots for each redis nodes/addresses +// defaultAddr is needed in case the node does not know its own IP +func parseSlots(slots RedisMessage, defaultAddr string) map[string]group { groups := make(map[string]group, len(slots.values)) for _, v := range slots.values { - master := fmt.Sprintf("%s:%d", v.values[2].values[0].string, v.values[2].values[1].integer) + var master string + switch v.values[2].values[0].string { + case "": + master = defaultAddr + case "?": + continue + default: + master = net.JoinHostPort(v.values[2].values[0].string, strconv.FormatInt(v.values[2].values[1].integer, 10)) + } g, ok := groups[master] if !ok { g.slots = make([][2]int64, 0) g.nodes = make([]string, 0, len(v.values)-2) for i := 2; i < len(v.values); i++ { - dst := fmt.Sprintf("%s:%d", v.values[i].values[0].string, v.values[i].values[1].integer) + var dst string + switch v.values[i].values[0].string { + case "": + dst = defaultAddr + case "?": + continue + default: + dst = net.JoinHostPort(v.values[i].values[0].string, strconv.FormatInt(v.values[i].values[1].integer, 10)) + } g.nodes = append(g.nodes, dst) } } @@ -280,7 +361,9 @@ process: resp = c.redirectOrNew(addr, cc).Do(ctx, cmd) goto process case RedirectAsk: - resp = c.redirectOrNew(addr, cc).DoMulti(ctx, cmds.AskingCmd, cmd)[1] + results := c.redirectOrNew(addr, cc).DoMulti(ctx, cmds.AskingCmd, cmd) + resp = results.s[1] + resultsp.Put(results) goto process case RedirectRetry: if c.retry && cmd.IsReadOnly() { @@ -291,98 +374,187 @@ process: return resp } -func (c *clusterClient) DoMulti(ctx context.Context, multi ...Completed) (results []RedisResult) { - if len(multi) == 0 { - return nil - } - slots := make(map[uint16]int, 8) +func (c *clusterClient) _pickMulti(multi []Completed) (retries map[conn]*retry, last uint16) { + last = cmds.InitSlot + init := false + + c.mu.RLock() + defer c.mu.RUnlock() + + count := make(map[conn]int, len(c.conns)) for _, cmd := range multi { - slots[cmd.Slot()]++ + if cmd.Slot() == cmds.InitSlot { + init = true + continue + } + p := c.slots[cmd.Slot()] + if p == nil { + return nil, 0 + } + count[p]++ } - if len(slots) > 2 && slots[cmds.InitSlot] > 0 { - panic(panicMixCxSlot) + + retries = make(map[conn]*retry, len(count)) + for cc, n := range count { + retries[cc] = retryp.Get(0, n) } - commands := make(map[uint16][]Completed, len(slots)) - cIndexes := make(map[uint16][]int, len(slots)) - if len(slots) == 2 && slots[cmds.InitSlot] > 0 { - delete(slots, cmds.InitSlot) - for slot := range slots { - commands[slot] = multi + + for i, cmd := range multi { + if cmd.Slot() != cmds.InitSlot { + if last == cmds.InitSlot { + last = cmd.Slot() + } else if init && last != cmd.Slot() { + panic(panicMixCxSlot) + } + cc := c.slots[cmd.Slot()] + re := retries[cc] + re.commands = append(re.commands, cmd) + re.cIndexes = append(re.cIndexes, i) } - } else { - for slot, count := range slots { - cIndexes[slot] = make([]int, 0, count) - commands[slot] = make([]Completed, 0, count) + } + + return retries, last +} + +func (c *clusterClient) pickMulti(multi []Completed) (map[conn]*retry, uint16, error) { + conns, slot := c._pickMulti(multi) + if conns == nil { + if err := c.refresh(); err != nil { + return nil, 0, err + } + if conns, slot = c._pickMulti(multi); conns == nil { + return nil, 0, ErrNoSlot } - for i, cmd := range multi { - slot := cmd.Slot() - commands[slot] = append(commands[slot], cmd) - cIndexes[slot] = append(cIndexes[slot], i) + } + return conns, slot, nil +} + +func (c *clusterClient) DoMulti(ctx context.Context, multi ...Completed) []RedisResult { + if len(multi) == 0 { + return nil + } + + retries, slot, err := c.pickMulti(multi) + if err != nil { + return fillErrs(len(multi), err) + } + + if len(retries) <= 1 { + for _, re := range retries { + retryp.Put(re) } + return c.doMulti(ctx, slot, multi) } - results = make([]RedisResult, len(multi)) - util.ParallelKeys(commands, func(slot uint16) { - c.doMulti(ctx, slot, commands[slot], cIndexes[slot], results) - }) + var wg sync.WaitGroup + var mu sync.Mutex + + results := resultsp.Get(len(multi), len(multi)) + respsfn := func(cc conn, cIndexes []int, commands []Completed, resps []RedisResult) { + for i, resp := range resps { + ii := cIndexes[i] + cm := commands[i] + results.s[ii] = resp + addr, mode := c.shouldRefreshRetry(resp.Error(), ctx) + if c.retry && mode != RedirectNone && cm.IsReadOnly() { + nc := cc + if mode != RedirectRetry { + nc = c.redirectOrNew(addr, cc) + } + mu.Lock() + nr := retries[nc] + if nr == nil { + nr = retryp.Get(0, len(commands)) + retries[nc] = nr + } + if mode == RedirectAsk { + nr.aIndexes = append(nr.aIndexes, ii) + nr.cAskings = append(nr.cAskings, cm) + } else { + nr.cIndexes = append(nr.cIndexes, ii) + nr.commands = append(nr.commands, cm) + } + mu.Unlock() + } + } + } + +retry: + wg.Add(len(retries)) + mu.Lock() + for cc, re := range retries { + delete(retries, cc) + go func(cc conn, re *retry) { + if len(re.commands) != 0 { + resps := cc.DoMulti(ctx, re.commands...) + respsfn(cc, re.cIndexes, re.commands, resps.s) + resultsp.Put(resps) + } + if len(re.cAskings) != 0 { + resps := askingMulti(cc, ctx, re.cAskings) + respsfn(cc, re.aIndexes, re.cAskings, resps.s) + resultsp.Put(resps) + } + if ctx.Err() == nil { + retryp.Put(re) + } + wg.Done() + }(cc, re) + } + mu.Unlock() + wg.Wait() + + if len(retries) != 0 { + goto retry + } for i, cmd := range multi { - if results[i].NonRedisError() == nil { + if results.s[i].NonRedisError() == nil { cmds.PutCompleted(cmd) } } - return results + return results.s } -func fillErrs(idx []int, results []RedisResult, err error) { - if idx == nil { - for i := range results { - results[i] = newErrResult(err) - } - } else { - for _, i := range idx { - results[i] = newErrResult(err) - } +func fillErrs(n int, err error) (results []RedisResult) { + results = resultsp.Get(n, n).s + for i := range results { + results[i] = newErrResult(err) } + return results } -func (c *clusterClient) doMulti(ctx context.Context, slot uint16, multi []Completed, idx []int, results []RedisResult) { +func (c *clusterClient) doMulti(ctx context.Context, slot uint16, multi []Completed) []RedisResult { retry: cc, err := c.pick(slot) if err != nil { - fillErrs(idx, results, err) - return + return fillErrs(len(multi), err) } resps := cc.DoMulti(ctx, multi...) process: - for _, resp := range resps { + for _, resp := range resps.s { switch addr, mode := c.shouldRefreshRetry(resp.Error(), ctx); mode { case RedirectMove: if c.retry && allReadOnly(multi) { + resultsp.Put(resps) resps = c.redirectOrNew(addr, cc).DoMulti(ctx, multi...) goto process } case RedirectAsk: if c.retry && allReadOnly(multi) { - resps = c.redirectOrNew(addr, cc).DoMulti(ctx, append([]Completed{cmds.AskingCmd}, multi...)...)[1:] + resultsp.Put(resps) + resps = askingMulti(c.redirectOrNew(addr, cc), ctx, multi) goto process } case RedirectRetry: if c.retry && allReadOnly(multi) { + resultsp.Put(resps) runtime.Gosched() goto retry } } } - if idx == nil { - for i, res := range resps { - results[i] = res - } - } else { - for i, res := range resps { - results[idx[i]] = res - } - } + return resps.s } func (c *clusterClient) doCache(ctx context.Context, cmd Cacheable, ttl time.Duration) (resp RedisResult) { @@ -398,8 +570,9 @@ process: resp = c.redirectOrNew(addr, cc).DoCache(ctx, cmd, ttl) goto process case RedirectAsk: - // TODO ASKING OPT-IN Caching - resp = c.redirectOrNew(addr, cc).DoMulti(ctx, cmds.AskingCmd, Completed(cmd))[1] + results := askingMultiCache(c.redirectOrNew(addr, cc), ctx, []CacheableTTL{CT(cmd, ttl)}) + resp = results.s[0] + resultsp.Put(results) goto process case RedirectRetry: if c.retry { @@ -418,90 +591,156 @@ func (c *clusterClient) DoCache(ctx context.Context, cmd Cacheable, ttl time.Dur return resp } -func (c *clusterClient) doMultiCache(ctx context.Context, slot uint16, multi []CacheableTTL, idx []int, results []RedisResult) { -retry: - cc, err := c.pick(slot) - if err != nil { - fillErrs(idx, results, err) - return +func askingMulti(cc conn, ctx context.Context, multi []Completed) *redisresults { + commands := make([]Completed, 0, len(multi)*2) + for _, cmd := range multi { + commands = append(commands, cmds.AskingCmd, cmd) } - resps := cc.DoMultiCache(ctx, multi...) -process: - for _, resp := range resps { - switch addr, mode := c.shouldRefreshRetry(resp.Error(), ctx); mode { - case RedirectMove: - resps = c.redirectOrNew(addr, cc).DoMultiCache(ctx, multi...) - goto process - case RedirectAsk: - commands := make([]Completed, 0, len(multi)+3) - commands = append(commands, cmds.AskingCmd, cmds.MultiCmd) - for _, cmd := range multi { - commands = append(commands, Completed(cmd.Cmd)) - } - commands = append(commands, cmds.ExecCmd) - if asked, err := c.redirectOrNew(addr, cc).DoMulti(ctx, commands...)[len(commands)-1].ToArray(); err != nil { - for i := range resps { - resps[i] = newErrResult(err) - } - } else { - for i, ret := range asked { - resps[i] = newResult(ret, nil) - } - } - goto process - case RedirectRetry: - if c.retry { - runtime.Gosched() - goto retry - } + results := resultsp.Get(0, len(multi)) + resps := cc.DoMulti(ctx, commands...) + for i := 1; i < len(resps.s); i += 2 { + results.s = append(results.s, resps.s[i]) + } + resultsp.Put(resps) + return results +} + +func askingMultiCache(cc conn, ctx context.Context, multi []CacheableTTL) *redisresults { + commands := make([]Completed, 0, len(multi)*6) + for _, cmd := range multi { + ck, _ := cmds.CacheKey(cmd.Cmd) + commands = append(commands, cmds.OptInCmd, cmds.MultiCmd, cmds.AskingCmd, cmds.NewCompleted([]string{"PTTL", ck}), Completed(cmd.Cmd), cmds.ExecCmd) + } + results := resultsp.Get(0, len(multi)) + resps := cc.DoMulti(ctx, commands...) + for i := 5; i < len(resps.s); i += 6 { + if arr, err := resps.s[i].ToArray(); err != nil { + results.s = append(results.s, newErrResult(err)) + } else { + results.s = append(results.s, newResult(arr[len(arr)-1], nil)) } } - if idx == nil { - copy(results, resps) - } else { - for i, resp := range resps { - results[idx[i]] = resp + resultsp.Put(resps) + return results +} + +func (c *clusterClient) _pickMultiCache(multi []CacheableTTL) map[conn]*retrycache { + c.mu.RLock() + defer c.mu.RUnlock() + + count := make(map[conn]int, len(c.conns)) + for _, cmd := range multi { + p := c.slots[cmd.Cmd.Slot()] + if p == nil { + return nil + } + count[p]++ + } + + retries := make(map[conn]*retrycache, len(count)) + for cc, n := range count { + retries[cc] = retrycachep.Get(0, n) + } + + for i, cmd := range multi { + cc := c.slots[cmd.Cmd.Slot()] + re := retries[cc] + re.commands = append(re.commands, cmd) + re.cIndexes = append(re.cIndexes, i) + } + + return retries +} + +func (c *clusterClient) pickMultiCache(multi []CacheableTTL) (map[conn]*retrycache, error) { + conns := c._pickMultiCache(multi) + if conns == nil { + if err := c.refresh(); err != nil { + return nil, err + } + if conns = c._pickMultiCache(multi); conns == nil { + return nil, ErrNoSlot } } + return conns, nil } -func (c *clusterClient) DoMultiCache(ctx context.Context, multi ...CacheableTTL) (results []RedisResult) { +func (c *clusterClient) DoMultiCache(ctx context.Context, multi ...CacheableTTL) []RedisResult { if len(multi) == 0 { return nil } - slots := make(map[uint16]int, 8) - for _, cmd := range multi { - slots[cmd.Cmd.Slot()]++ + + retries, err := c.pickMultiCache(multi) + if err != nil { + return fillErrs(len(multi), err) } - commands := make(map[uint16][]CacheableTTL, len(slots)) - cIndexes := make(map[uint16][]int, len(slots)) - if len(slots) == 1 { - for slot := range slots { - commands[slot] = multi - } - } else { - for slot, count := range slots { - cIndexes[slot] = make([]int, 0, count) - commands[slot] = make([]CacheableTTL, 0, count) - } - for i, cmd := range multi { - slot := cmd.Cmd.Slot() - commands[slot] = append(commands[slot], cmd) - cIndexes[slot] = append(cIndexes[slot], i) + + var wg sync.WaitGroup + var mu sync.Mutex + + results := resultsp.Get(len(multi), len(multi)) + respsfn := func(cc conn, cIndexes []int, commands []CacheableTTL, resps []RedisResult) { + for i, resp := range resps { + ii := cIndexes[i] + cm := commands[i] + results.s[ii] = resp + addr, mode := c.shouldRefreshRetry(resp.Error(), ctx) + if c.retry && mode != RedirectNone { + nc := cc + if mode != RedirectRetry { + nc = c.redirectOrNew(addr, cc) + } + mu.Lock() + nr := retries[nc] + if nr == nil { + nr = retrycachep.Get(0, len(commands)) + retries[nc] = nr + } + if mode == RedirectAsk { + nr.aIndexes = append(nr.aIndexes, ii) + nr.cAskings = append(nr.cAskings, cm) + } else { + nr.cIndexes = append(nr.cIndexes, ii) + nr.commands = append(nr.commands, cm) + } + mu.Unlock() + } } } - results = make([]RedisResult, len(multi)) - util.ParallelKeys(commands, func(slot uint16) { - c.doMultiCache(ctx, slot, commands[slot], cIndexes[slot], results) - }) +retry: + wg.Add(len(retries)) + mu.Lock() + for cc, re := range retries { + delete(retries, cc) + go func(cc conn, re *retrycache) { + if len(re.commands) != 0 { + resps := cc.DoMultiCache(ctx, re.commands...) + respsfn(cc, re.cIndexes, re.commands, resps.s) + resultsp.Put(resps) + } + if len(re.cAskings) != 0 { + resps := askingMultiCache(cc, ctx, re.cAskings) + respsfn(cc, re.aIndexes, re.cAskings, resps.s) + resultsp.Put(resps) + } + retrycachep.Put(re) + wg.Done() + }(cc, re) + } + mu.Unlock() + wg.Wait() + + if len(retries) != 0 { + goto retry + } for i, cmd := range multi { - if err := results[i].NonRedisError(); err == nil || err == ErrDoCacheAborted { + if err := results.s[i].NonRedisError(); err == nil || err == ErrDoCacheAborted { cmds.PutCacheable(cmd.Cmd) } } - return results + return results.s } func (c *clusterClient) Receive(ctx context.Context, subscribe Completed, fn func(msg PubSubMessage)) (err error) { @@ -563,15 +802,12 @@ func (c *clusterClient) shouldRefreshRetry(err error, ctx context.Context) (addr } else if err.IsClusterDown() || err.IsTryAgain() { mode = RedirectRetry } - } else { + } else if ctx.Err() == nil { mode = RedirectRetry } if mode != RedirectNone { go c.refresh() } - if mode == RedirectRetry && ctx.Err() != nil { - mode = RedirectNone - } } return } @@ -675,7 +911,7 @@ func (c *dedicatedClusterClient) DoMulti(ctx context.Context, multi ...Completed } retry: if w, err := c.acquire(slot); err == nil { - resp = w.DoMulti(ctx, multi...) + resp = w.DoMulti(ctx, multi...).s for _, r := range resp { _, mode := c.client.shouldRefreshRetry(r.Error(), ctx) if mode == RedirectRetry && retryable && w.Error() == nil { @@ -687,7 +923,7 @@ retry: } } } else { - resp = make([]RedisResult, len(multi)) + resp = resultsp.Get(len(multi), len(multi)).s for i := range resp { resp[i] = newErrResult(err) } diff --git a/vendor/github.com/redis/rueidis/docker-compose.yml b/vendor/github.com/redis/rueidis/docker-compose.yml index 8e7e373e096..083a1a86ac2 100644 --- a/vendor/github.com/redis/rueidis/docker-compose.yml +++ b/vendor/github.com/redis/rueidis/docker-compose.yml @@ -18,7 +18,7 @@ services: ports: - "6344:6379" dragonflydb: - image: docker.dragonflydb.io/dragonflydb/dragonfly:v1.1.2 + image: docker.dragonflydb.io/dragonflydb/dragonfly:v1.4.0 ports: - "6333:6379" kvrocks: diff --git a/vendor/github.com/redis/rueidis/dockertest.sh b/vendor/github.com/redis/rueidis/dockertest.sh index 0097c781e87..68273a1be18 100644 --- a/vendor/github.com/redis/rueidis/dockertest.sh +++ b/vendor/github.com/redis/rueidis/dockertest.sh @@ -2,11 +2,7 @@ set -ev +trap "docker-compose down -v" EXIT docker-compose up -d -go test -coverprofile=./c.out -v -race -timeout 30m ./... -docker-compose down -v - -if [ ! -z "$CODECOV_TOKEN" ]; then - cp c.out coverage.txt - bash <(curl -s https://codecov.io/bash) -fi \ No newline at end of file +go install gotest.tools/gotestsum@v1.10.0 +gotestsum --format standard-verbose --junitfile unit-tests.xml -- -coverprofile=coverage.out -race -timeout 15m "$@" diff --git a/vendor/github.com/redis/rueidis/helper.go b/vendor/github.com/redis/rueidis/helper.go index 475f4e5525c..4abdb767a61 100644 --- a/vendor/github.com/redis/rueidis/helper.go +++ b/vendor/github.com/redis/rueidis/helper.go @@ -3,25 +3,24 @@ package rueidis import ( "context" "errors" - "sync" "time" - "github.com/redis/rueidis/internal/cmds" - "github.com/redis/rueidis/internal/util" + intl "github.com/redis/rueidis/internal/cmds" ) -// MGetCache is a helper that consults the client-side caches with multiple keys by grouping keys within same slot into MGETs +// MGetCache is a helper that consults the client-side caches with multiple keys by grouping keys within same slot into multiple GETs func MGetCache(client Client, ctx context.Context, ttl time.Duration, keys []string) (ret map[string]RedisMessage, err error) { if len(keys) == 0 { return make(map[string]RedisMessage), nil } - if _, ok := client.(*singleClient); ok { - return clientMGetCache(client, ctx, ttl, client.B().Mget().Key(keys...).Cache(), keys) + cmds := make([]CacheableTTL, len(keys)) + for i := range cmds { + cmds[i] = CT(client.B().Get().Key(keys[i]).Cache(), ttl) } - return parallelMGetCache(client, ctx, ttl, cmds.MGets(keys), keys) + return doMultiCache(client, ctx, cmds, keys) } -// MGet is a helper that consults the redis directly with multiple keys by grouping keys within same slot into MGETs +// MGet is a helper that consults the redis directly with multiple keys by grouping keys within same slot into MGET or multiple GETs func MGet(client Client, ctx context.Context, keys []string) (ret map[string]RedisMessage, err error) { if len(keys) == 0 { return make(map[string]RedisMessage), nil @@ -29,10 +28,14 @@ func MGet(client Client, ctx context.Context, keys []string) (ret map[string]Red if _, ok := client.(*singleClient); ok { return clientMGet(client, ctx, client.B().Mget().Key(keys...).Build(), keys) } - return parallelMGet(client, ctx, cmds.MGets(keys), keys) + cmds := make([]Completed, len(keys)) + for i := range cmds { + cmds[i] = client.B().Get().Key(keys[i]).Build() + } + return doMultiGet(client, ctx, cmds, keys) } -// MSet is a helper that consults the redis directly with multiple keys by grouping keys within same slot into MSETs +// MSet is a helper that consults the redis directly with multiple keys by grouping keys within same slot into MSETs or multiple SETs func MSet(client Client, ctx context.Context, kvs map[string]string) map[string]error { if len(kvs) == 0 { return make(map[string]error) @@ -40,10 +43,31 @@ func MSet(client Client, ctx context.Context, kvs map[string]string) map[string] if _, ok := client.(*singleClient); ok { return clientMSet(client, ctx, "MSET", kvs, make(map[string]error, len(kvs))) } - return parallelMSet(client, ctx, cmds.MSets(kvs), make(map[string]error, len(kvs))) + cmds := make([]Completed, 0, len(kvs)) + keys := make([]string, 0, len(kvs)) + for k, v := range kvs { + cmds = append(cmds, client.B().Set().Key(k).Value(v).Build()) + keys = append(keys, k) + } + return doMultiSet(client, ctx, cmds, keys) +} + +// MDel is a helper that consults the redis directly with multiple keys by grouping keys within same slot into DELs +func MDel(client Client, ctx context.Context, keys []string) map[string]error { + if len(keys) == 0 { + return make(map[string]error) + } + if _, ok := client.(*singleClient); ok { + return clientMDel(client, ctx, keys) + } + cmds := make([]Completed, len(keys)) + for i, k := range keys { + cmds[i] = client.B().Del().Key(k).Build() + } + return doMultiSet(client, ctx, cmds, keys) } -// MSetNX is a helper that consults the redis directly with multiple keys by grouping keys within same slot into MSETNXs +// MSetNX is a helper that consults the redis directly with multiple keys by grouping keys within same slot into MSETNXs or multiple SETNXs func MSetNX(client Client, ctx context.Context, kvs map[string]string) map[string]error { if len(kvs) == 0 { return make(map[string]error) @@ -51,21 +75,28 @@ func MSetNX(client Client, ctx context.Context, kvs map[string]string) map[strin if _, ok := client.(*singleClient); ok { return clientMSet(client, ctx, "MSETNX", kvs, make(map[string]error, len(kvs))) } - return parallelMSet(client, ctx, cmds.MSetNXs(kvs), make(map[string]error, len(kvs))) + cmds := make([]Completed, 0, len(kvs)) + keys := make([]string, 0, len(kvs)) + for k, v := range kvs { + cmds = append(cmds, client.B().Set().Key(k).Value(v).Nx().Build()) + keys = append(keys, k) + } + return doMultiSet(client, ctx, cmds, keys) } -// JsonMGetCache is a helper that consults the client-side caches with multiple keys by grouping keys within same slot into JSON.MGETs +// JsonMGetCache is a helper that consults the client-side caches with multiple keys by grouping keys within same slot into multiple JSON.GETs func JsonMGetCache(client Client, ctx context.Context, ttl time.Duration, keys []string, path string) (ret map[string]RedisMessage, err error) { if len(keys) == 0 { return make(map[string]RedisMessage), nil } - if _, ok := client.(*singleClient); ok { - return clientMGetCache(client, ctx, ttl, client.B().JsonMget().Key(keys...).Path(path).Cache(), keys) + cmds := make([]CacheableTTL, len(keys)) + for i := range cmds { + cmds[i] = CT(client.B().JsonGet().Key(keys[i]).Path(path).Cache(), ttl) } - return parallelMGetCache(client, ctx, ttl, cmds.JsonMGets(keys, path), keys) + return doMultiCache(client, ctx, cmds, keys) } -// JsonMGet is a helper that consults redis directly with multiple keys by grouping keys within same slot into JSON.MGETs +// JsonMGet is a helper that consults redis directly with multiple keys by grouping keys within same slot into JSON.MGETs or multiple JSON.GETs func JsonMGet(client Client, ctx context.Context, keys []string, path string) (ret map[string]RedisMessage, err error) { if len(keys) == 0 { return make(map[string]RedisMessage), nil @@ -73,15 +104,28 @@ func JsonMGet(client Client, ctx context.Context, keys []string, path string) (r if _, ok := client.(*singleClient); ok { return clientMGet(client, ctx, client.B().JsonMget().Key(keys...).Path(path).Build(), keys) } - return parallelMGet(client, ctx, cmds.JsonMGets(keys, path), keys) + cmds := make([]Completed, len(keys)) + for i := range cmds { + cmds[i] = client.B().JsonGet().Key(keys[i]).Path(path).Build() + } + return doMultiGet(client, ctx, cmds, keys) } -func clientMGetCache(client Client, ctx context.Context, ttl time.Duration, cmd Cacheable, keys []string) (ret map[string]RedisMessage, err error) { - arr, err := client.DoCache(ctx, cmd, ttl).ToArray() - if err != nil { - return nil, err +// JsonMSet is a helper that consults redis directly with multiple keys by grouping keys within same slot into JSON.MSETs or multiple JOSN.SETs +func JsonMSet(client Client, ctx context.Context, kvs map[string]string, path string) map[string]error { + if len(kvs) == 0 { + return make(map[string]error) } - return arrayToKV(make(map[string]RedisMessage, len(keys)), arr, keys), nil + if _, ok := client.(*singleClient); ok { + return clientJSONMSet(client, ctx, kvs, path, make(map[string]error, len(kvs))) + } + cmds := make([]Completed, 0, len(kvs)) + keys := make([]string, 0, len(kvs)) + for k, v := range kvs { + cmds = append(cmds, client.B().JsonSet().Key(k).Path(path).Value(v).Build()) + keys = append(keys, k) + } + return doMultiSet(client, ctx, cmds, keys) } func clientMGet(client Client, ctx context.Context, cmd Completed, keys []string) (ret map[string]RedisMessage, err error) { @@ -107,63 +151,61 @@ func clientMSet(client Client, ctx context.Context, mset string, kvs map[string] return ret } -func parallelMGetCache(cc Client, ctx context.Context, ttl time.Duration, mgets map[uint16]Completed, keys []string) (ret map[string]RedisMessage, err error) { - return doMGets(make(map[string]RedisMessage, len(keys)), mgets, func(cmd Completed) RedisResult { - return cc.DoCache(ctx, Cacheable(cmd), ttl) - }) -} - -func parallelMGet(cc Client, ctx context.Context, mgets map[uint16]Completed, keys []string) (ret map[string]RedisMessage, err error) { - return doMGets(make(map[string]RedisMessage, len(keys)), mgets, func(cmd Completed) RedisResult { - return cc.Do(ctx, cmd) - }) +func clientJSONMSet(client Client, ctx context.Context, kvs map[string]string, path string, ret map[string]error) map[string]error { + cmd := intl.JsonMsetTripletValue(client.B().JsonMset()) + for k, v := range kvs { + cmd = cmd.Key(k).Path(path).Value(v) + } + err := client.Do(ctx, cmd.Build()).Error() + for k := range kvs { + ret[k] = err + } + return ret } -func parallelMSet(cc Client, ctx context.Context, msets map[uint16]Completed, ret map[string]error) map[string]error { - var mu sync.Mutex - for _, cmd := range msets { - cmd.Pin() +func clientMDel(client Client, ctx context.Context, keys []string) map[string]error { + err := client.Do(ctx, client.B().Del().Key(keys...).Build()).Error() + ret := make(map[string]error, len(keys)) + for _, k := range keys { + ret[k] = err } - util.ParallelVals(msets, func(cmd Completed) { - ok, err := cc.Do(ctx, cmd).AsBool() - err2 := err - if err2 == nil && !ok { - err2 = ErrMSetNXNotSet - } - mu.Lock() - for i := 1; i < len(cmd.Commands()); i += 2 { - ret[cmd.Commands()[i]] = err2 - } - mu.Unlock() - if err == nil { - cmds.Put(cmds.CompletedCS(cmd)) - } - }) return ret } -func doMGets(m map[string]RedisMessage, mgets map[uint16]Completed, fn func(cmd Completed) RedisResult) (ret map[string]RedisMessage, err error) { - var mu sync.Mutex - for _, cmd := range mgets { - cmd.Pin() +func doMultiCache(cc Client, ctx context.Context, cmds []CacheableTTL, keys []string) (ret map[string]RedisMessage, err error) { + ret = make(map[string]RedisMessage, len(keys)) + resps := cc.DoMultiCache(ctx, cmds...) + defer resultsp.Put(&redisresults{s: resps}) + for i, resp := range resps { + if err := resp.NonRedisError(); err != nil { + return nil, err + } + ret[keys[i]] = resp.val } - util.ParallelVals(mgets, func(cmd Completed) { - arr, err2 := fn(cmd).ToArray() - mu.Lock() - if err2 != nil { - err = err2 - } else { - arrayToKV(m, arr, cmd.Commands()[1:]) + return ret, nil +} + +func doMultiGet(cc Client, ctx context.Context, cmds []Completed, keys []string) (ret map[string]RedisMessage, err error) { + ret = make(map[string]RedisMessage, len(keys)) + resps := cc.DoMulti(ctx, cmds...) + defer resultsp.Put(&redisresults{s: resps}) + for i, resp := range resps { + if err := resp.NonRedisError(); err != nil { + return nil, err } - mu.Unlock() - }) - if err != nil { - return nil, err + ret[keys[i]] = resp.val } - for _, cmd := range mgets { - cmds.Put(cmds.CompletedCS(cmd)) + return ret, nil +} + +func doMultiSet(cc Client, ctx context.Context, cmds []Completed, keys []string) (ret map[string]error) { + ret = make(map[string]error, len(keys)) + resps := cc.DoMulti(ctx, cmds...) + for i, resp := range resps { + ret[keys[i]] = resp.Error() } - return m, nil + resultsp.Put(&redisresults{s: resps}) + return ret } func arrayToKV(m map[string]RedisMessage, arr []RedisMessage, keys []string) map[string]RedisMessage { diff --git a/vendor/github.com/redis/rueidis/internal/cmds/cmds.go b/vendor/github.com/redis/rueidis/internal/cmds/cmds.go index d5bad54d138..36fe3531eee 100644 --- a/vendor/github.com/redis/rueidis/internal/cmds/cmds.go +++ b/vendor/github.com/redis/rueidis/internal/cmds/cmds.go @@ -66,7 +66,12 @@ var ( } // SentinelSubscribe is predefined SUBSCRIBE ASKING SentinelSubscribe = Completed{ - cs: newCommandSlice([]string{"SUBSCRIBE", "+sentinel", "+switch-master", "+reboot"}), + cs: newCommandSlice([]string{"SUBSCRIBE", "+sentinel", "+slave", "-sdown", "+sdown", "+switch-master", "+reboot"}), + cf: noRetTag, + } + // SentinelUnSubscribe is predefined UNSUBSCRIBE ASKING + SentinelUnSubscribe = Completed{ + cs: newCommandSlice([]string{"UNSUBSCRIBE", "+sentinel", "+slave", "-sdown", "+sdown", "+switch-master", "+reboot"}), cf: noRetTag, } ) @@ -237,7 +242,12 @@ func NewMGetCompleted(ss []string) Completed { // MGets groups keys by their slot and returns multi MGET commands func MGets(keys []string) map[uint16]Completed { - return slotMGets("MGET", keys) + return slotMCMDs("MGET", keys, mtGetTag) +} + +// MDels groups keys by their slot and returns multi DEL commands +func MDels(keys []string) map[uint16]Completed { + return slotMCMDs("DEL", keys, 0) } // MSets groups keys by their slot and returns multi MSET commands @@ -252,7 +262,7 @@ func MSetNXs(kvs map[string]string) map[uint16]Completed { // JsonMGets groups keys by their slot and returns multi JSON.MGET commands func JsonMGets(keys []string, path string) map[uint16]Completed { - ret := slotMGets("JSON.MGET", keys) + ret := slotMCMDs("JSON.MGET", keys, mtGetTag) for _, jsonmget := range ret { jsonmget.cs.s = append(jsonmget.cs.s, path) jsonmget.cs.l++ @@ -260,7 +270,27 @@ func JsonMGets(keys []string, path string) map[uint16]Completed { return ret } -func slotMGets(cmd string, keys []string) map[uint16]Completed { +// JsonMSets groups keys by their slot and returns multi JSON.MSET commands +func JsonMSets(kvs map[string]string, path string) map[uint16]Completed { + ret := make(map[uint16]Completed, 8) + for key, value := range kvs { + var cs *CommandSlice + ks := slot(key) + if cp, ok := ret[ks]; ok { + cs = cp.cs + } else { + cs = get() + cs.s = append(cs.s, "JSON.MSET") + cs.l = 1 + ret[ks] = Completed{cs: cs, ks: ks} + } + cs.s = append(cs.s, key, path, value) + cs.l += 3 + } + return ret +} + +func slotMCMDs(cmd string, keys []string, cf uint16) map[uint16]Completed { ret := make(map[uint16]Completed, 8) for _, key := range keys { var cs *CommandSlice @@ -271,7 +301,7 @@ func slotMGets(cmd string, keys []string) map[uint16]Completed { cs = get() cs.s = append(cs.s, cmd) cs.l = 1 - ret[ks] = Completed{cs: cs, cf: mtGetTag, ks: ks} + ret[ks] = Completed{cs: cs, cf: cf, ks: ks} } cs.s = append(cs.s, key) cs.l++ diff --git a/vendor/github.com/redis/rueidis/internal/cmds/gen_graph.go b/vendor/github.com/redis/rueidis/internal/cmds/gen_graph.go index 57df3a854ae..88f6492c051 100644 --- a/vendor/github.com/redis/rueidis/internal/cmds/gen_graph.go +++ b/vendor/github.com/redis/rueidis/internal/cmds/gen_graph.go @@ -51,6 +51,32 @@ func (c GraphConfigSetValue) Build() Completed { return Completed(c) } +type GraphConstraintCreate Completed + +func (b Builder) GraphConstraintCreate() (c GraphConstraintCreate) { + c = GraphConstraintCreate{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "GRAPH.CONSTRAINT", "CREATE") + return c +} + +func (c GraphConstraintCreate) Build() Completed { + c.cs.Build() + return Completed(c) +} + +type GraphConstraintDrop Completed + +func (b Builder) GraphConstraintDrop() (c GraphConstraintDrop) { + c = GraphConstraintDrop{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "GRAPH.CONSTRAINT", "DROP") + return c +} + +func (c GraphConstraintDrop) Build() Completed { + c.cs.Build() + return Completed(c) +} + type GraphDelete Completed func (b Builder) GraphDelete() (c GraphDelete) { diff --git a/vendor/github.com/redis/rueidis/internal/cmds/gen_json.go b/vendor/github.com/redis/rueidis/internal/cmds/gen_json.go index 84eb7d80236..728ffccfce5 100644 --- a/vendor/github.com/redis/rueidis/internal/cmds/gen_json.go +++ b/vendor/github.com/redis/rueidis/internal/cmds/gen_json.go @@ -618,6 +618,45 @@ func (c JsonGetSpace) Cache() Cacheable { return Cacheable(c) } +type JsonMerge Completed + +func (b Builder) JsonMerge() (c JsonMerge) { + c = JsonMerge{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.MERGE") + return c +} + +func (c JsonMerge) Key(key string) JsonMergeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonMergeKey)(c) +} + +type JsonMergeKey Completed + +func (c JsonMergeKey) Path(path string) JsonMergePath { + c.cs.s = append(c.cs.s, path) + return (JsonMergePath)(c) +} + +type JsonMergePath Completed + +func (c JsonMergePath) Value(value string) JsonMergeValue { + c.cs.s = append(c.cs.s, value) + return (JsonMergeValue)(c) +} + +type JsonMergeValue Completed + +func (c JsonMergeValue) Build() Completed { + c.cs.Build() + return Completed(c) +} + type JsonMget Completed func (b Builder) JsonMget() (c JsonMget) { @@ -675,6 +714,55 @@ func (c JsonMgetPath) Cache() Cacheable { return Cacheable(c) } +type JsonMset Completed + +func (b Builder) JsonMset() (c JsonMset) { + c = JsonMset{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.MSET") + return c +} + +func (c JsonMset) Key(key string) JsonMsetTripletKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonMsetTripletKey)(c) +} + +type JsonMsetTripletKey Completed + +func (c JsonMsetTripletKey) Path(path string) JsonMsetTripletPath { + c.cs.s = append(c.cs.s, path) + return (JsonMsetTripletPath)(c) +} + +type JsonMsetTripletPath Completed + +func (c JsonMsetTripletPath) Value(value string) JsonMsetTripletValue { + c.cs.s = append(c.cs.s, value) + return (JsonMsetTripletValue)(c) +} + +type JsonMsetTripletValue Completed + +func (c JsonMsetTripletValue) Key(key string) JsonMsetTripletKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonMsetTripletKey)(c) +} + +func (c JsonMsetTripletValue) Build() Completed { + c.cs.Build() + return Completed(c) +} + type JsonNumincrby Completed func (b Builder) JsonNumincrby() (c JsonNumincrby) { diff --git a/vendor/github.com/redis/rueidis/internal/cmds/gen_sentinel.go b/vendor/github.com/redis/rueidis/internal/cmds/gen_sentinel.go index b66384109a1..471237bc0ab 100644 --- a/vendor/github.com/redis/rueidis/internal/cmds/gen_sentinel.go +++ b/vendor/github.com/redis/rueidis/internal/cmds/gen_sentinel.go @@ -42,6 +42,26 @@ func (c SentinelGetMasterAddrByNameMaster) Build() Completed { return Completed(c) } +type SentinelReplicas Completed + +func (b Builder) SentinelReplicas() (c SentinelReplicas) { + c = SentinelReplicas{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SENTINEL", "REPLICAS") + return c +} + +func (c SentinelReplicas) Master(master string) SentinelReplicasMaster { + c.cs.s = append(c.cs.s, master) + return (SentinelReplicasMaster)(c) +} + +type SentinelReplicasMaster Completed + +func (c SentinelReplicasMaster) Build() Completed { + c.cs.Build() + return Completed(c) +} + type SentinelSentinels Completed func (b Builder) SentinelSentinels() (c SentinelSentinels) { diff --git a/vendor/github.com/redis/rueidis/internal/cmds/gen_string.go b/vendor/github.com/redis/rueidis/internal/cmds/gen_string.go index 630eae4a26a..d28a2029142 100644 --- a/vendor/github.com/redis/rueidis/internal/cmds/gen_string.go +++ b/vendor/github.com/redis/rueidis/internal/cmds/gen_string.go @@ -2,7 +2,10 @@ package cmds -import "strconv" +import ( + "strconv" + "time" +) type Append Completed @@ -166,6 +169,13 @@ func (c Getex) Key(key string) GetexKey { return (GetexKey)(c) } +type GetexExpirationExSecTyped Completed + +func (c GetexExpirationExSecTyped) Build() Completed { + c.cs.Build() + return Completed(c) +} + type GetexExpirationExSeconds Completed func (c GetexExpirationExSeconds) Build() Completed { @@ -180,6 +190,13 @@ func (c GetexExpirationExatTimestamp) Build() Completed { return Completed(c) } +type GetexExpirationExatTimestampTyped Completed + +func (c GetexExpirationExatTimestampTyped) Build() Completed { + c.cs.Build() + return Completed(c) +} + type GetexExpirationPersist Completed func (c GetexExpirationPersist) Build() Completed { @@ -194,6 +211,13 @@ func (c GetexExpirationPxMilliseconds) Build() Completed { return Completed(c) } +type GetexExpirationPxMsTyped Completed + +func (c GetexExpirationPxMsTyped) Build() Completed { + c.cs.Build() + return Completed(c) +} + type GetexExpirationPxatMillisecondsTimestamp Completed func (c GetexExpirationPxatMillisecondsTimestamp) Build() Completed { @@ -201,6 +225,13 @@ func (c GetexExpirationPxatMillisecondsTimestamp) Build() Completed { return Completed(c) } +type GetexExpirationPxatMsTimestampTyped Completed + +func (c GetexExpirationPxatMsTimestampTyped) Build() Completed { + c.cs.Build() + return Completed(c) +} + type GetexKey Completed func (c GetexKey) ExSeconds(seconds int64) GetexExpirationExSeconds { @@ -228,6 +259,26 @@ func (c GetexKey) Persist() GetexExpirationPersist { return (GetexExpirationPersist)(c) } +func (c GetexKey) Ex(duration time.Duration) GetexExpirationExSecTyped { + c.cs.s = append(c.cs.s, "EX", strconv.FormatInt(int64(duration/time.Second), 10)) + return (GetexExpirationExSecTyped)(c) +} + +func (c GetexKey) Px(duration time.Duration) GetexExpirationPxMsTyped { + c.cs.s = append(c.cs.s, "PX", strconv.FormatInt(int64(duration/time.Millisecond), 10)) + return (GetexExpirationPxMsTyped)(c) +} + +func (c GetexKey) Exat(timestamp time.Time) GetexExpirationExatTimestampTyped { + c.cs.s = append(c.cs.s, "EXAT", strconv.FormatInt(timestamp.Unix(), 10)) + return (GetexExpirationExatTimestampTyped)(c) +} + +func (c GetexKey) Pxat(timestamp time.Time) GetexExpirationPxatMsTimestampTyped { + c.cs.s = append(c.cs.s, "PXAT", strconv.FormatInt(timestamp.UnixMilli(), 10)) + return (GetexExpirationPxatMsTimestampTyped)(c) +} + func (c GetexKey) Build() Completed { c.cs.Build() return Completed(c) @@ -710,6 +761,26 @@ func (c SetConditionNx) Keepttl() SetExpirationKeepttl { return (SetExpirationKeepttl)(c) } +func (c SetConditionNx) Ex(duration time.Duration) SetExpirationExSecTyped { + c.cs.s = append(c.cs.s, "EX", strconv.FormatInt(int64(duration/time.Second), 10)) + return (SetExpirationExSecTyped)(c) +} + +func (c SetConditionNx) Px(duration time.Duration) SetExpirationPxMsTyped { + c.cs.s = append(c.cs.s, "PX", strconv.FormatInt(int64(duration/time.Millisecond), 10)) + return (SetExpirationPxMsTyped)(c) +} + +func (c SetConditionNx) Exat(timestamp time.Time) SetExpirationExatTimestampTyped { + c.cs.s = append(c.cs.s, "EXAT", strconv.FormatInt(timestamp.Unix(), 10)) + return (SetExpirationExatTimestampTyped)(c) +} + +func (c SetConditionNx) Pxat(timestamp time.Time) SetExpirationPxatMsTimestampTyped { + c.cs.s = append(c.cs.s, "PXAT", strconv.FormatInt(timestamp.UnixMilli(), 10)) + return (SetExpirationPxatMsTimestampTyped)(c) +} + func (c SetConditionNx) Build() Completed { c.cs.Build() return Completed(c) @@ -747,11 +818,38 @@ func (c SetConditionXx) Keepttl() SetExpirationKeepttl { return (SetExpirationKeepttl)(c) } +func (c SetConditionXx) Ex(duration time.Duration) SetExpirationExSecTyped { + c.cs.s = append(c.cs.s, "EX", strconv.FormatInt(int64(duration/time.Second), 10)) + return (SetExpirationExSecTyped)(c) +} + +func (c SetConditionXx) Px(duration time.Duration) SetExpirationPxMsTyped { + c.cs.s = append(c.cs.s, "PX", strconv.FormatInt(int64(duration/time.Millisecond), 10)) + return (SetExpirationPxMsTyped)(c) +} + +func (c SetConditionXx) Exat(timestamp time.Time) SetExpirationExatTimestampTyped { + c.cs.s = append(c.cs.s, "EXAT", strconv.FormatInt(timestamp.Unix(), 10)) + return (SetExpirationExatTimestampTyped)(c) +} + +func (c SetConditionXx) Pxat(timestamp time.Time) SetExpirationPxatMsTimestampTyped { + c.cs.s = append(c.cs.s, "PXAT", strconv.FormatInt(timestamp.UnixMilli(), 10)) + return (SetExpirationPxatMsTimestampTyped)(c) +} + func (c SetConditionXx) Build() Completed { c.cs.Build() return Completed(c) } +type SetExpirationExSecTyped Completed + +func (c SetExpirationExSecTyped) Build() Completed { + c.cs.Build() + return Completed(c) +} + type SetExpirationExSeconds Completed func (c SetExpirationExSeconds) Build() Completed { @@ -766,6 +864,13 @@ func (c SetExpirationExatTimestamp) Build() Completed { return Completed(c) } +type SetExpirationExatTimestampTyped Completed + +func (c SetExpirationExatTimestampTyped) Build() Completed { + c.cs.Build() + return Completed(c) +} + type SetExpirationKeepttl Completed func (c SetExpirationKeepttl) Build() Completed { @@ -780,6 +885,13 @@ func (c SetExpirationPxMilliseconds) Build() Completed { return Completed(c) } +type SetExpirationPxMsTyped Completed + +func (c SetExpirationPxMsTyped) Build() Completed { + c.cs.Build() + return Completed(c) +} + type SetExpirationPxatMillisecondsTimestamp Completed func (c SetExpirationPxatMillisecondsTimestamp) Build() Completed { @@ -787,6 +899,13 @@ func (c SetExpirationPxatMillisecondsTimestamp) Build() Completed { return Completed(c) } +type SetExpirationPxatMsTimestampTyped Completed + +func (c SetExpirationPxatMsTimestampTyped) Build() Completed { + c.cs.Build() + return Completed(c) +} + type SetGet Completed func (c SetGet) ExSeconds(seconds int64) SetExpirationExSeconds { @@ -814,6 +933,26 @@ func (c SetGet) Keepttl() SetExpirationKeepttl { return (SetExpirationKeepttl)(c) } +func (c SetGet) Ex(duration time.Duration) SetExpirationExSecTyped { + c.cs.s = append(c.cs.s, "EX", strconv.FormatInt(int64(duration/time.Second), 10)) + return (SetExpirationExSecTyped)(c) +} + +func (c SetGet) Px(duration time.Duration) SetExpirationPxMsTyped { + c.cs.s = append(c.cs.s, "PX", strconv.FormatInt(int64(duration/time.Millisecond), 10)) + return (SetExpirationPxMsTyped)(c) +} + +func (c SetGet) Exat(timestamp time.Time) SetExpirationExatTimestampTyped { + c.cs.s = append(c.cs.s, "EXAT", strconv.FormatInt(timestamp.Unix(), 10)) + return (SetExpirationExatTimestampTyped)(c) +} + +func (c SetGet) Pxat(timestamp time.Time) SetExpirationPxatMsTimestampTyped { + c.cs.s = append(c.cs.s, "PXAT", strconv.FormatInt(timestamp.UnixMilli(), 10)) + return (SetExpirationPxatMsTimestampTyped)(c) +} + func (c SetGet) Build() Completed { c.cs.Build() return Completed(c) @@ -868,6 +1007,26 @@ func (c SetValue) Keepttl() SetExpirationKeepttl { return (SetExpirationKeepttl)(c) } +func (c SetValue) Ex(duration time.Duration) SetExpirationExSecTyped { + c.cs.s = append(c.cs.s, "EX", strconv.FormatInt(int64(duration/time.Second), 10)) + return (SetExpirationExSecTyped)(c) +} + +func (c SetValue) Px(duration time.Duration) SetExpirationPxMsTyped { + c.cs.s = append(c.cs.s, "PX", strconv.FormatInt(int64(duration/time.Millisecond), 10)) + return (SetExpirationPxMsTyped)(c) +} + +func (c SetValue) Exat(timestamp time.Time) SetExpirationExatTimestampTyped { + c.cs.s = append(c.cs.s, "EXAT", strconv.FormatInt(timestamp.Unix(), 10)) + return (SetExpirationExatTimestampTyped)(c) +} + +func (c SetValue) Pxat(timestamp time.Time) SetExpirationPxatMsTimestampTyped { + c.cs.s = append(c.cs.s, "PXAT", strconv.FormatInt(timestamp.UnixMilli(), 10)) + return (SetExpirationPxatMsTimestampTyped)(c) +} + func (c SetValue) Build() Completed { c.cs.Build() return Completed(c) diff --git a/vendor/github.com/redis/rueidis/internal/util/parallel.go b/vendor/github.com/redis/rueidis/internal/util/parallel.go index 9ad06107245..e5c4b237bcf 100644 --- a/vendor/github.com/redis/rueidis/internal/util/parallel.go +++ b/vendor/github.com/redis/rueidis/internal/util/parallel.go @@ -1,41 +1,40 @@ package util import ( - "runtime" "sync" ) -func ParallelKeys[K comparable, V any](p map[K]V, fn func(k K)) { +func ParallelKeys[K comparable, V any](maxp int, p map[K]V, fn func(k K)) { ch := make(chan K, len(p)) for k := range p { ch <- k } - closeThenParallel(ch, fn) + closeThenParallel(maxp, ch, fn) } -func ParallelVals[K comparable, V any](p map[K]V, fn func(k V)) { +func ParallelVals[K comparable, V any](maxp int, p map[K]V, fn func(k V)) { ch := make(chan V, len(p)) for _, v := range p { ch <- v } - closeThenParallel(ch, fn) + closeThenParallel(maxp, ch, fn) } -func closeThenParallel[V any](ch chan V, fn func(k V)) { +func closeThenParallel[V any](maxp int, ch chan V, fn func(k V)) { close(ch) concurrency := len(ch) - if cpus := runtime.NumCPU(); concurrency > cpus { - concurrency = cpus + if concurrency > maxp { + concurrency = maxp } wg := sync.WaitGroup{} wg.Add(concurrency) for i := 1; i < concurrency; i++ { - go func() { + go func(wg *sync.WaitGroup) { for v := range ch { fn(v) } wg.Done() - }() + }(&wg) } for v := range ch { fn(v) diff --git a/vendor/github.com/redis/rueidis/internal/util/pool.go b/vendor/github.com/redis/rueidis/internal/util/pool.go new file mode 100644 index 00000000000..fd251fb266f --- /dev/null +++ b/vendor/github.com/redis/rueidis/internal/util/pool.go @@ -0,0 +1,40 @@ +package util + +import ( + "sync" + "sync/atomic" +) + +type Container interface { + Capacity() int + ResetLen(n int) +} + +func NewPool[T Container](fn func(capacity int) T) *Pool[T] { + p := &Pool[T]{fn: fn} + p.sp.New = func() any { + return fn(int(atomic.LoadUint32(&p.ca))) + } + return p +} + +type Pool[T Container] struct { + sp sync.Pool + fn func(capacity int) T + ca uint32 +} + +func (p *Pool[T]) Get(length, capacity int) T { + atomic.StoreUint32(&p.ca, uint32(capacity)) + s := p.sp.Get().(T) + if s.Capacity() < capacity { + p.sp.Put(s) + s = p.fn(capacity) + } + s.ResetLen(length) + return s +} + +func (p *Pool[T]) Put(s T) { + p.sp.Put(s) +} diff --git a/vendor/github.com/redis/rueidis/lru.go b/vendor/github.com/redis/rueidis/lru.go index 97b5714dfa7..ceb410b6c35 100644 --- a/vendor/github.com/redis/rueidis/lru.go +++ b/vendor/github.com/redis/rueidis/lru.go @@ -7,6 +7,8 @@ import ( "sync/atomic" "time" "unsafe" + + "github.com/redis/rueidis/internal/cmds" ) const ( @@ -128,6 +130,96 @@ ret: return v, ce } +func (c *lru) Flights(now time.Time, multi []CacheableTTL, results []RedisResult, entries map[int]CacheEntry) (missed []int) { + var moves []*list.Element + + c.mu.RLock() + for i, ct := range multi { + key, cmd := cmds.CacheKey(ct.Cmd) + if kc, ok := c.store[key]; ok { + if ele, ok := kc.cache[cmd]; ok { + e := ele.Value.(*cacheEntry) + v := e.val + if v.typ == 0 { + entries[i] = e + } else if v.relativePTTL(now) > 0 { + results[i] = newResult(v, nil) + } else { + goto miss1 + } + if atomic.AddUint64(&kc.hits, 1)&moveThreshold == 0 { + if moves == nil { + moves = make([]*list.Element, 0, len(multi)) + } + moves = append(moves, ele) + } + continue + } + } + miss1: + if missed == nil { + missed = make([]int, 0, len(multi)) + } + missed = append(missed, i) + } + c.mu.RUnlock() + + if len(moves) > 0 { + c.mu.Lock() + if c.list != nil { + for _, ele := range moves { + c.list.MoveToBack(ele) + } + } + c.mu.Unlock() + } + + if len(missed) == 0 { + return missed + } + + j := 0 + c.mu.Lock() + if c.store == nil { + c.mu.Unlock() + return missed + } + for _, i := range missed { + key, cmd := cmds.CacheKey(multi[i].Cmd) + kc, ok := c.store[key] + if !ok { + kc = &keyCache{cache: make(map[string]*list.Element, 1), key: key} + c.store[key] = kc + } + if ele, ok := kc.cache[cmd]; ok { + e := ele.Value.(*cacheEntry) + v := e.val + if v.typ == 0 { + entries[i] = e + } else if v.relativePTTL(now) > 0 { + results[i] = newResult(v, nil) + } else { + c.list.Remove(ele) + c.size -= e.size + goto miss2 + } + atomic.AddUint64(&kc.hits, 1) + c.list.MoveToBack(ele) + continue + } + miss2: + atomic.AddUint64(&kc.miss, 1) + v := RedisMessage{} + v.setExpireAt(now.Add(multi[i].TTL).UnixMilli()) + c.list.PushBack(&cacheEntry{cmd: cmd, kc: kc, val: v, ch: make(chan struct{})}) + kc.cache[cmd] = c.list.Back() + missed[j] = i + j++ + } + c.mu.Unlock() + return missed[:j] +} + func (c *lru) Update(key, cmd string, value RedisMessage) (pxat int64) { var ch chan struct{} c.mu.Lock() diff --git a/vendor/github.com/redis/rueidis/lua.go b/vendor/github.com/redis/rueidis/lua.go index 973b4a8ddb1..2b2b02a607f 100644 --- a/vendor/github.com/redis/rueidis/lua.go +++ b/vendor/github.com/redis/rueidis/lua.go @@ -4,6 +4,7 @@ import ( "context" "crypto/sha1" "encoding/hex" + "runtime" "sync/atomic" "github.com/redis/rueidis/internal/util" @@ -12,7 +13,7 @@ import ( // NewLuaScript creates a Lua instance whose Lua.Exec uses EVALSHA and EVAL. func NewLuaScript(script string) *Lua { sum := sha1.Sum([]byte(script)) - return &Lua{script: script, sha1: hex.EncodeToString(sum[:])} + return &Lua{script: script, sha1: hex.EncodeToString(sum[:]), maxp: runtime.GOMAXPROCS(0)} } // NewLuaScriptReadOnly creates a Lua instance whose Lua.Exec uses EVALSHA_RO and EVAL_RO. @@ -26,6 +27,7 @@ func NewLuaScriptReadOnly(script string) *Lua { type Lua struct { script string sha1 string + maxp int readonly bool } @@ -38,7 +40,7 @@ func (s *Lua) Exec(ctx context.Context, c Client, keys, args []string) (resp Red } else { resp = c.Do(ctx, c.B().Evalsha().Sha1(s.sha1).Numkeys(int64(len(keys))).Key(keys...).Arg(args...).Build()) } - if err := resp.RedisError(); err != nil && err.IsNoScript() { + if err, ok := IsRedisErr(resp.Error()); ok && err.IsNoScript() { if s.readonly { resp = c.Do(ctx, c.B().EvalRo().Script(s.script).Numkeys(int64(len(keys))).Key(keys...).Arg(args...).Build()) } else { @@ -59,7 +61,7 @@ type LuaExec struct { // Cross slot keys within single LuaExec are prohibited if the Client is a cluster client. func (s *Lua) ExecMulti(ctx context.Context, c Client, multi ...LuaExec) (resp []RedisResult) { var e atomic.Value - util.ParallelVals(c.Nodes(), func(n Client) { + util.ParallelVals(s.maxp, c.Nodes(), func(n Client) { if err := n.Do(ctx, n.B().ScriptLoad().Script(s.script).Build()).Error(); err != nil { e.CompareAndSwap(nil, &errs{error: err}) } diff --git a/vendor/github.com/redis/rueidis/message.go b/vendor/github.com/redis/rueidis/message.go index 11d96ac5e74..23fccc5c85a 100644 --- a/vendor/github.com/redis/rueidis/message.go +++ b/vendor/github.com/redis/rueidis/message.go @@ -15,14 +15,20 @@ import ( const messageStructSize = int(unsafe.Sizeof(RedisMessage{})) // Nil represents a Redis Nil message -var Nil = &RedisError{typ: '_'} +var Nil = &RedisError{typ: typeNull} -// IsRedisNil is a handy method to check if error is redis nil response. +// IsRedisNil is a handy method to check if error is a redis nil response. // All redis nil response returns as an error. func IsRedisNil(err error) bool { return err == Nil } +// IsRedisErr is a handy method to check if error is a redis ERR response. +func IsRedisErr(err error) (ret *RedisError, ok bool) { + ret, ok = err.(*RedisError) + return ret, ok && ret != Nil +} + // RedisError is an error response or a nil message from redis instance type RedisError RedisMessage @@ -35,7 +41,7 @@ func (r *RedisError) Error() string { // IsNil checks if it is a redis nil message. func (r *RedisError) IsNil() bool { - return r.typ == '_' + return r.typ == typeNull } // IsMoved checks if it is a redis MOVED message and returns moved address. @@ -84,14 +90,6 @@ type RedisResult struct { val RedisMessage } -// RedisError can be used to check if the redis response is an error message. -func (r RedisResult) RedisError() *RedisError { - if err := r.val.Error(); err != nil { - return err.(*RedisError) - } - return nil -} - // NonRedisError can be used to check if there is an underlying error (ex. network timeout). func (r RedisResult) NonRedisError() error { return r.err @@ -334,6 +332,15 @@ func (r RedisResult) AsFtSearch() (total int64, docs []FtSearchDoc, err error) { return } +func (r RedisResult) AsGeosearch() (locations []GeoLocation, err error) { + if r.err != nil { + err = r.err + } else { + locations, err = r.val.AsGeosearch() + } + return +} + // AsMap delegates to RedisMessage.AsMap func (r RedisResult) AsMap() (v map[string]RedisMessage, err error) { if r.err != nil { @@ -426,45 +433,45 @@ type RedisMessage struct { // IsNil check if message is a redis nil response func (m *RedisMessage) IsNil() bool { - return m.typ == '_' + return m.typ == typeNull } // IsInt64 check if message is a redis RESP3 int response func (m *RedisMessage) IsInt64() bool { - return m.typ == ':' + return m.typ == typeInteger } // IsFloat64 check if message is a redis RESP3 double response func (m *RedisMessage) IsFloat64() bool { - return m.typ == ',' + return m.typ == typeFloat } // IsString check if message is a redis string response func (m *RedisMessage) IsString() bool { - return m.typ == '$' || m.typ == '+' + return m.typ == typeBlobString || m.typ == typeSimpleString } // IsBool check if message is a redis RESP3 bool response func (m *RedisMessage) IsBool() bool { - return m.typ == '#' + return m.typ == typeBool } // IsArray check if message is a redis array response func (m *RedisMessage) IsArray() bool { - return m.typ == '*' || m.typ == '~' + return m.typ == typeArray || m.typ == typeSet } // IsMap check if message is a redis RESP3 map response func (m *RedisMessage) IsMap() bool { - return m.typ == '%' + return m.typ == typeMap } // Error check if message is a redis error response, including nil response func (m *RedisMessage) Error() error { - if m.typ == '_' { + if m.typ == typeNull { return Nil } - if m.typ == '-' || m.typ == '!' { + if m.typ == typeSimpleErr || m.typ == typeBlobErr { // kvrocks: https://github.com/redis/rueidis/issues/152#issuecomment-1333923750 mm := *m mm.string = strings.TrimPrefix(m.string, "ERR ") @@ -480,7 +487,7 @@ func (m *RedisMessage) ToString() (val string, err error) { } if m.IsInt64() || m.values != nil { typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a string", typ)) + panic(fmt.Sprintf("redis message type %s is not a string", typeNames[typ])) } return m.string, m.Error() } @@ -534,18 +541,18 @@ func (m *RedisMessage) AsBool() (val bool, err error) { return } switch m.typ { - case '$', '+': + case typeBlobString, typeSimpleString: val = m.string == "OK" return - case ':': + case typeInteger: val = m.integer != 0 return - case '#': + case typeBool: val = m.integer == 1 return default: typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a int, string or bool", typ)) + panic(fmt.Sprintf("redis message type %s is not a int, string or bool", typeNames[typ])) } } @@ -570,7 +577,7 @@ func (m *RedisMessage) ToInt64() (val int64, err error) { return 0, err } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a RESP3 int64", typ)) + panic(fmt.Sprintf("redis message type %s is not a RESP3 int64", typeNames[typ])) } // ToBool check if message is a redis RESP3 bool response, and return it @@ -582,7 +589,7 @@ func (m *RedisMessage) ToBool() (val bool, err error) { return false, err } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a RESP3 bool", typ)) + panic(fmt.Sprintf("redis message type %s is not a RESP3 bool", typeNames[typ])) } // ToFloat64 check if message is a redis RESP3 double response, and return it @@ -594,7 +601,7 @@ func (m *RedisMessage) ToFloat64() (val float64, err error) { return 0, err } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a RESP3 float64", typ)) + panic(fmt.Sprintf("redis message type %s is not a RESP3 float64", typeNames[typ])) } // ToArray check if message is a redis array/set response, and return it @@ -606,7 +613,7 @@ func (m *RedisMessage) ToArray() ([]RedisMessage, error) { return nil, err } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a array", typ)) + panic(fmt.Sprintf("redis message type %s is not a array", typeNames[typ])) } // AsStrSlice check if message is a redis array/set response, and convert to []string. @@ -735,7 +742,7 @@ func (m *RedisMessage) AsXRead() (ret map[string][]XRangeEntry, err error) { return ret, nil } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a map/array/set or its length is not even", typ)) + panic(fmt.Sprintf("redis message type %s is not a map/array/set or its length is not even", typeNames[typ])) } // ZScore is the element type of ZRANGE WITHSCORES, ZDIFF WITHSCORES and ZPOPMAX command response @@ -807,7 +814,7 @@ func (m *RedisMessage) AsScanEntry() (e ScanEntry, err error) { return e, err } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a scan response or its length is not at least 2", typ)) + panic(fmt.Sprintf("redis message type %s is not a scan response or its length is not at least 2", typeNames[typ])) } // AsMap check if message is a redis array/set response, and convert to map[string]RedisMessage @@ -819,7 +826,7 @@ func (m *RedisMessage) AsMap() (map[string]RedisMessage, error) { return toMap(m.values), nil } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a map/array/set or its length is not even", typ)) + panic(fmt.Sprintf("redis message type %s is not a map/array/set or its length is not even", typeNames[typ])) } // AsStrMap check if message is a redis map/array/set response, and convert to map[string]string. @@ -838,7 +845,7 @@ func (m *RedisMessage) AsStrMap() (map[string]string, error) { return r, nil } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a map/array/set or its length is not even", typ)) + panic(fmt.Sprintf("redis message type %s is not a map/array/set or its length is not even", typeNames[typ])) } // AsIntMap check if message is a redis map/array/set response, and convert to map[string]int64. @@ -853,12 +860,12 @@ func (m *RedisMessage) AsIntMap() (map[string]int64, error) { for i := 0; i < len(m.values); i += 2 { k := m.values[i] v := m.values[i+1] - if k.typ == '$' || k.typ == '+' { + if k.typ == typeBlobString || k.typ == typeSimpleString { if len(v.string) != 0 { if r[k.string], err = strconv.ParseInt(v.string, 0, 64); err != nil { return nil, err } - } else if v.typ == ':' || v.typ == '_' { + } else if v.typ == typeInteger || v.typ == typeNull { r[k.string] = v.integer } } @@ -866,7 +873,7 @@ func (m *RedisMessage) AsIntMap() (map[string]int64, error) { return r, nil } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a map/array/set or its length is not even", typ)) + panic(fmt.Sprintf("redis message type %s is not a map/array/set or its length is not even", typeNames[typ])) } type KeyValues struct { @@ -884,7 +891,7 @@ func (m *RedisMessage) AsLMPop() (kvs KeyValues, err error) { return } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a LMPOP response", typ)) + panic(fmt.Sprintf("redis message type %s is not a LMPOP response", typeNames[typ])) } type KeyZScores struct { @@ -902,7 +909,7 @@ func (m *RedisMessage) AsZMPop() (kvs KeyZScores, err error) { return } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a ZMPOP response", typ)) + panic(fmt.Sprintf("redis message type %s is not a ZMPOP response", typeNames[typ])) } type FtSearchDoc struct { @@ -931,7 +938,58 @@ func (m *RedisMessage) AsFtSearch() (total int64, docs []FtSearchDoc, err error) return } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a FT.SEARCH response", typ)) + panic(fmt.Sprintf("redis message type %s is not a FT.SEARCH response", typeNames[typ])) +} + +type GeoLocation struct { + Name string + Longitude, Latitude, Dist float64 + GeoHash int64 +} + +func (m *RedisMessage) AsGeosearch() ([]GeoLocation, error) { + arr, err := m.ToArray() + if err != nil { + return nil, err + } + geoLocations := make([]GeoLocation, 0, len(arr)) + for _, v := range arr { + var loc GeoLocation + if v.IsString() { + loc.Name = v.string + } else { + info := v.values + var i int + + //name + loc.Name = info[i].string + i++ + //distance + if i < len(info) && info[i].string != "" { + loc.Dist, err = util.ToFloat64(info[i].string) + if err != nil { + return nil, err + } + i++ + } + //hash + if i < len(info) && info[i].IsInt64() { + loc.GeoHash = info[i].integer + i++ + } + //coordinates + if i < len(info) && info[i].values != nil { + cord := info[i].values + if len(cord) < 2 { + return nil, fmt.Errorf("got %d, expected 2", len(info)) + } + loc.Longitude, _ = cord[0].AsFloat64() + loc.Latitude, _ = cord[1].AsFloat64() + } + } + geoLocations = append(geoLocations, loc) + } + return geoLocations, nil } // ToMap check if message is a redis RESP3 map response, and return it @@ -943,7 +1001,7 @@ func (m *RedisMessage) ToMap() (map[string]RedisMessage, error) { return nil, err } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a RESP3 map", typ)) + panic(fmt.Sprintf("redis message type %s is not a RESP3 map", typeNames[typ])) } // ToAny turns message into go any value @@ -952,15 +1010,15 @@ func (m *RedisMessage) ToAny() (any, error) { return nil, err } switch m.typ { - case ',': + case typeFloat: return util.ToFloat64(m.string) - case '$', '+', '=', '(': + case typeBlobString, typeSimpleString, typeVerbatimString, typeBigNumber: return m.string, nil - case '#': + case typeBool: return m.integer == 1, nil - case ':': + case typeInteger: return m.integer, nil - case '%': + case typeMap: vs := make(map[string]any, len(m.values)/2) for i := 0; i < len(m.values); i += 2 { if v, err := m.values[i+1].ToAny(); err != nil && !IsRedisNil(err) { @@ -970,7 +1028,7 @@ func (m *RedisMessage) ToAny() (any, error) { } } return vs, nil - case '~', '*': + case typeSet, typeArray: vs := make([]any, len(m.values)) for i := 0; i < len(m.values); i++ { if v, err := m.values[i].ToAny(); err != nil && !IsRedisNil(err) { @@ -982,7 +1040,7 @@ func (m *RedisMessage) ToAny() (any, error) { return vs, nil } typ := m.typ - panic(fmt.Sprintf("redis message type %c is not a supported in ToAny", typ)) + panic(fmt.Sprintf("redis message type %s is not a supported in ToAny", typeNames[typ])) } // IsCacheHit check if message is from client side cache @@ -1045,12 +1103,12 @@ func (m *RedisMessage) setExpireAt(pttl int64) { func toMap(values []RedisMessage) map[string]RedisMessage { r := make(map[string]RedisMessage, len(values)/2) for i := 0; i < len(values); i += 2 { - if values[i].typ == '$' || values[i].typ == '+' { + if values[i].typ == typeBlobString || values[i].typ == typeSimpleString { r[values[i].string] = values[i+1] continue } typ := values[i].typ - panic(fmt.Sprintf("redis message type %c as map key is not supported", typ)) + panic(fmt.Sprintf("redis message type %s as map key is not supported", typeNames[typ])) } return r } diff --git a/vendor/github.com/redis/rueidis/mux.go b/vendor/github.com/redis/rueidis/mux.go index 20246489370..42e5a5865a3 100644 --- a/vendor/github.com/redis/rueidis/mux.go +++ b/vendor/github.com/redis/rueidis/mux.go @@ -3,6 +3,7 @@ package rueidis import ( "context" "net" + "runtime" "sync" "sync/atomic" "time" @@ -21,11 +22,32 @@ type singleconnect struct { g sync.WaitGroup } +type batchcache struct { + cIndexes []int + commands []CacheableTTL +} + +func (r *batchcache) Capacity() int { + return cap(r.commands) +} + +func (r *batchcache) ResetLen(n int) { + r.cIndexes = r.cIndexes[:n] + r.commands = r.commands[:n] +} + +var batchcachep = util.NewPool(func(capacity int) *batchcache { + return &batchcache{ + cIndexes: make([]int, 0, capacity), + commands: make([]CacheableTTL, 0, capacity), + } +}) + type conn interface { Do(ctx context.Context, cmd Completed) RedisResult DoCache(ctx context.Context, cmd Cacheable, ttl time.Duration) RedisResult - DoMulti(ctx context.Context, multi ...Completed) []RedisResult - DoMultiCache(ctx context.Context, multi ...CacheableTTL) []RedisResult + DoMulti(ctx context.Context, multi ...Completed) *redisresults + DoMultiCache(ctx context.Context, multi ...CacheableTTL) *redisresults Receive(ctx context.Context, subscribe Completed, fn func(message PubSubMessage)) error Info() map[string]RedisMessage Error() error @@ -48,6 +70,7 @@ type mux struct { wire []atomic.Value sc []*singleconnect mu []sync.Mutex + maxp int } func makeMux(dst string, option *ClientOption, dialFn dialFn) *mux { @@ -75,6 +98,7 @@ func newMux(dst string, option *ClientOption, init, dead wire, wireFn wireFn) *m wire: make([]atomic.Value, multiplex), mu: make([]sync.Mutex, multiplex), sc: make([]*singleconnect, multiplex), + maxp: runtime.GOMAXPROCS(0), } for i := 0; i < len(m.wire); i++ { m.wire[i].Store(init) @@ -163,7 +187,7 @@ func (m *mux) Do(ctx context.Context, cmd Completed) (resp RedisResult) { return resp } -func (m *mux) DoMulti(ctx context.Context, multi ...Completed) (resp []RedisResult) { +func (m *mux) DoMulti(ctx context.Context, multi ...Completed) (resp *redisresults) { for _, cmd := range multi { if cmd.IsBlock() { goto block @@ -185,10 +209,10 @@ func (m *mux) blocking(ctx context.Context, cmd Completed) (resp RedisResult) { return resp } -func (m *mux) blockingMulti(ctx context.Context, cmd []Completed) (resp []RedisResult) { +func (m *mux) blockingMulti(ctx context.Context, cmd []Completed) (resp *redisresults) { wire := m.pool.Acquire() resp = wire.DoMulti(ctx, cmd...) - for _, res := range resp { + for _, res := range resp.s { if res.NonRedisError() != nil { // abort the wire if blocking command return early (ex. context.DeadlineExceeded) wire.Close() break @@ -207,11 +231,11 @@ func (m *mux) pipeline(ctx context.Context, cmd Completed) (resp RedisResult) { return resp } -func (m *mux) pipelineMulti(ctx context.Context, cmd []Completed) (resp []RedisResult) { +func (m *mux) pipelineMulti(ctx context.Context, cmd []Completed) (resp *redisresults) { slot := cmd[0].Slot() & uint16(len(m.wire)-1) wire := m.pipe(slot) resp = wire.DoMulti(ctx, cmd...) - for _, r := range resp { + for _, r := range resp.s { if isBroken(r.NonRedisError(), wire) { m.wire[slot].CompareAndSwap(wire, m.init) return resp @@ -230,7 +254,7 @@ func (m *mux) DoCache(ctx context.Context, cmd Cacheable, ttl time.Duration) Red return resp } -func (m *mux) DoMultiCache(ctx context.Context, multi ...CacheableTTL) (results []RedisResult) { +func (m *mux) DoMultiCache(ctx context.Context, multi ...CacheableTTL) (results *redisresults) { var slots map[uint16]int var mask = uint16(len(m.wire) - 1) @@ -247,32 +271,37 @@ func (m *mux) DoMultiCache(ctx context.Context, multi ...CacheableTTL) (results return m.doMultiCache(ctx, multi[0].Cmd.Slot()&mask, multi) } - commands := make(map[uint16][]CacheableTTL, len(slots)) - cIndexes := make(map[uint16][]int, len(slots)) + batches := make(map[uint16]*batchcache, len(m.wire)) for slot, count := range slots { - cIndexes[slot] = make([]int, 0, count) - commands[slot] = make([]CacheableTTL, 0, count) + batches[slot] = batchcachep.Get(0, count) } for i, cmd := range multi { - slot := cmd.Cmd.Slot() & mask - commands[slot] = append(commands[slot], cmd) - cIndexes[slot] = append(cIndexes[slot], i) + batch := batches[cmd.Cmd.Slot()&mask] + batch.commands = append(batch.commands, cmd) + batch.cIndexes = append(batch.cIndexes, i) } - results = make([]RedisResult, len(multi)) - util.ParallelKeys(commands, func(slot uint16) { - for i, r := range m.doMultiCache(ctx, slot, commands[slot]) { - results[cIndexes[slot][i]] = r + results = resultsp.Get(len(multi), len(multi)) + util.ParallelKeys(m.maxp, batches, func(slot uint16) { + batch := batches[slot] + resp := m.doMultiCache(ctx, slot, batch.commands) + for i, r := range resp.s { + results.s[batch.cIndexes[i]] = r } + resultsp.Put(resp) }) + for _, batch := range batches { + batchcachep.Put(batch) + } + return results } -func (m *mux) doMultiCache(ctx context.Context, slot uint16, multi []CacheableTTL) (resps []RedisResult) { +func (m *mux) doMultiCache(ctx context.Context, slot uint16, multi []CacheableTTL) (resps *redisresults) { wire := m.pipe(slot) resps = wire.DoMultiCache(ctx, multi...) - for _, r := range resps { + for _, r := range resps.s { if isBroken(r.NonRedisError(), wire) { m.wire[slot].CompareAndSwap(wire, m.init) return resps diff --git a/vendor/github.com/redis/rueidis/pipe.go b/vendor/github.com/redis/rueidis/pipe.go index 1ecb5d59704..0890d0e1b30 100644 --- a/vendor/github.com/redis/rueidis/pipe.go +++ b/vendor/github.com/redis/rueidis/pipe.go @@ -16,6 +16,7 @@ import ( "time" "github.com/redis/rueidis/internal/cmds" + "github.com/redis/rueidis/internal/util" ) var noHello = regexp.MustCompile("unknown command .?HELLO.?") @@ -23,8 +24,8 @@ var noHello = regexp.MustCompile("unknown command .?HELLO.?") type wire interface { Do(ctx context.Context, cmd Completed) RedisResult DoCache(ctx context.Context, cmd Cacheable, ttl time.Duration) RedisResult - DoMulti(ctx context.Context, multi ...Completed) []RedisResult - DoMultiCache(ctx context.Context, multi ...CacheableTTL) []RedisResult + DoMulti(ctx context.Context, multi ...Completed) *redisresults + DoMultiCache(ctx context.Context, multi ...CacheableTTL) *redisresults Receive(ctx context.Context, subscribe Completed, fn func(message PubSubMessage)) error Info() map[string]RedisMessage Error() error @@ -35,6 +36,44 @@ type wire interface { SetOnCloseHook(fn func(error)) } +type redisresults struct { + s []RedisResult +} + +func (r *redisresults) Capacity() int { + return cap(r.s) +} + +func (r *redisresults) ResetLen(n int) { + r.s = r.s[:n] + for i := 0; i < n; i++ { + r.s[i] = RedisResult{} + } +} + +var resultsp = util.NewPool(func(capacity int) *redisresults { + return &redisresults{s: make([]RedisResult, 0, capacity)} +}) + +type cacheentries struct { + e map[int]CacheEntry + c int +} + +func (c *cacheentries) Capacity() int { + return c.c +} + +func (c *cacheentries) ResetLen(n int) { + for k := range c.e { + delete(c.e, k) + } +} + +var entriesp = util.NewPool(func(capacity int) *cacheentries { + return &cacheentries{e: make(map[int]CacheEntry, capacity), c: capacity} +}) + var _ wire = (*pipe)(nil) type pipe struct { @@ -125,12 +164,23 @@ func _newPipe(connFn func() (net.Conn, error), option *ClientOption, r2ps bool) } else { init = append(init, helloCmd, append([]string{"CLIENT", "TRACKING", "ON"}, option.ClientTrackingOptions...)) } + if option.ClientNoEvict { + init = append(init, []string{"CLIENT", "NO-EVICT", "ON"}) + } + if option.ClientSetInfo != nil { + clientSetInfoCmd := []string{"CLIENT", "SETINFO"} + clientSetInfoCmd = append(clientSetInfoCmd, option.ClientSetInfo...) + init = append(init, clientSetInfoCmd) + } if option.DisableCache { init = init[:1] } if option.SelectDB != 0 { init = append(init, []string{"SELECT", strconv.Itoa(option.SelectDB)}) } + if option.ClientNoTouch { + init = append(init, []string{"CLIENT", "NO-TOUCH", "ON"}) + } timeout := option.Dialer.Timeout if timeout <= 0 { @@ -142,7 +192,9 @@ func _newPipe(connFn func() (net.Conn, error), option *ClientOption, r2ps bool) r2 := option.AlwaysRESP2 if !r2 && !r2ps { - for i, r := range p.DoMulti(ctx, cmds.NewMultiCompleted(init)...) { + resp := p.DoMulti(ctx, cmds.NewMultiCompleted(init)...) + defer resultsp.Put(resp) + for i, r := range resp.s { if i == 0 { p.info, err = r.AsMap() } else { @@ -189,11 +241,25 @@ func _newPipe(connFn func() (net.Conn, error), option *ClientOption, r2ps bool) if option.ClientName != "" { init = append(init, []string{"CLIENT", "SETNAME", option.ClientName}) } + if option.ClientNoEvict { + init = append(init, []string{"CLIENT", "NO-EVICT", "ON"}) + } + if option.ClientSetInfo != nil { + clientSetInfoCmd := []string{"CLIENT", "SETINFO"} + clientSetInfoCmd = append(clientSetInfoCmd, option.ClientSetInfo...) + init = append(init, clientSetInfoCmd) + } if option.SelectDB != 0 { init = append(init, []string{"SELECT", strconv.Itoa(option.SelectDB)}) } + if option.ClientNoTouch { + init = append(init, []string{"CLIENT", "NO-TOUCH", "ON"}) + } + if len(init) != 0 { - for _, r := range p.DoMulti(ctx, cmds.NewMultiCompleted(init)...) { + resp := p.DoMulti(ctx, cmds.NewMultiCompleted(init)...) + defer resultsp.Put(resp) + for _, r := range resp.s { if err = r.Error(); err != nil { p.Close() return nil, err @@ -394,34 +460,6 @@ func (p *pipe) _backgroundRead() (err error) { } } } - // if unfulfilled multi commands are lead by opt-in and get success response - if ff == len(multi)-1 && multi[0].IsOptIn() && len(msg.values) >= 2 { - now := time.Now() - if cacheable := Cacheable(multi[len(multi)-2]); cacheable.IsMGet() { - cc := cmds.MGetCacheCmd(cacheable) - msgs := msg.values[len(msg.values)-1].values - for i, cp := range msgs { - ck := cmds.MGetCacheKey(cacheable, i) - cp.attrs = cacheMark - if pttl := msg.values[i].integer; pttl >= 0 { - cp.setExpireAt(now.Add(time.Duration(pttl) * time.Millisecond).UnixMilli()) - } - msgs[i].setExpireAt(p.cache.Update(ck, cc, cp)) - } - } else { - msgs := msg.values - for i := 1; i < len(msgs); i += 2 { - cacheable = Cacheable(multi[i+2]) - ck, cc := cmds.CacheKey(cacheable) - cp := msg.values[i] - cp.attrs = cacheMark - if pttl := msg.values[i-1].integer; pttl >= 0 { - cp.setExpireAt(now.Add(time.Duration(pttl) * time.Millisecond).UnixMilli()) - } - msgs[i].setExpireAt(p.cache.Update(ck, cc, cp)) - } - } - } if ff == len(multi) { ff = 0 ones[0], multi, ch, cond = p.queue.NextResultCh() // ch should not be nil, otherwise it must be a protocol bug @@ -441,6 +479,29 @@ func (p *pipe) _backgroundRead() (err error) { if multi == nil { multi = ones } + } else if ff >= 4 && len(msg.values) >= 2 && multi[0].IsOptIn() { // if unfulfilled multi commands are lead by opt-in and get success response + now := time.Now() + if cacheable := Cacheable(multi[ff-1]); cacheable.IsMGet() { + cc := cmds.MGetCacheCmd(cacheable) + msgs := msg.values[len(msg.values)-1].values + for i, cp := range msgs { + ck := cmds.MGetCacheKey(cacheable, i) + cp.attrs = cacheMark + if pttl := msg.values[i].integer; pttl >= 0 { + cp.setExpireAt(now.Add(time.Duration(pttl) * time.Millisecond).UnixMilli()) + } + msgs[i].setExpireAt(p.cache.Update(ck, cc, cp)) + } + } else { + ck, cc := cmds.CacheKey(cacheable) + ci := len(msg.values) - 1 + cp := msg.values[ci] + cp.attrs = cacheMark + if pttl := msg.values[ci-1].integer; pttl >= 0 { + cp.setExpireAt(now.Add(time.Duration(pttl) * time.Millisecond).UnixMilli()) + } + msg.values[ci].setExpireAt(p.cache.Update(ck, cc, cp)) + } } if prply { // Redis will send sunsubscribe notification proactively in the event of slot migration. @@ -459,6 +520,8 @@ func (p *pipe) _backgroundRead() (err error) { } skip = len(multi[ff].Commands()) - 2 msg = RedisMessage{} // override successful subscribe/unsubscribe response to empty + } else if multi[ff].NoReply() && msg.string == "QUEUED" { + panic(multiexecsub) } ch <- newResult(msg, err) if ff++; ff == len(multi) { @@ -762,11 +825,11 @@ queue: return resp } -func (p *pipe) DoMulti(ctx context.Context, multi ...Completed) []RedisResult { - resp := make([]RedisResult, len(multi)) +func (p *pipe) DoMulti(ctx context.Context, multi ...Completed) *redisresults { + resp := resultsp.Get(len(multi), len(multi)) if err := ctx.Err(); err != nil { - for i := 0; i < len(resp); i++ { - resp[i] = newErrResult(err) + for i := 0; i < len(resp.s); i++ { + resp.s[i] = newErrResult(err) } return resp } @@ -785,8 +848,8 @@ func (p *pipe) DoMulti(ctx context.Context, multi ...Completed) []RedisResult { if p.version < 6 && noReply != 0 { if noReply != len(multi) { - for i := 0; i < len(resp); i++ { - resp[i] = newErrResult(ErrRESP2PubSubMixed) + for i := 0; i < len(resp.s); i++ { + resp.s[i] = newErrResult(ErrRESP2PubSubMixed) } return resp } else if p.r2psFn != nil { @@ -804,7 +867,7 @@ func (p *pipe) DoMulti(ctx context.Context, multi ...Completed) []RedisResult { if isBlock { atomic.AddInt32(&p.blcksig, 1) defer func() { - for _, r := range resp { + for _, r := range resp.s { if r.err != nil { return } @@ -833,11 +896,11 @@ func (p *pipe) DoMulti(ctx context.Context, multi ...Completed) []RedisResult { p.background() goto queue } - resp = p.syncDoMulti(dl, ok, resp, multi) + resp.s = p.syncDoMulti(dl, ok, resp.s, multi) } else { err := newErrResult(p.Error()) - for i := 0; i < len(resp); i++ { - resp[i] = err + for i := 0; i < len(resp.s); i++ { + resp.s[i] = err } } if left := atomic.AddInt32(&p.waits, -1); state == 0 && waits == 1 && left != 0 { @@ -850,13 +913,13 @@ queue: ch := p.queue.PutMulti(multi) var i int if ctxCh := ctx.Done(); ctxCh == nil { - for ; i < len(resp); i++ { - resp[i] = <-ch + for ; i < len(resp.s); i++ { + resp.s[i] = <-ch } } else { - for ; i < len(resp); i++ { + for ; i < len(resp.s); i++ { select { - case resp[i] = <-ch: + case resp.s[i] = <-ch: case <-ctxCh: goto abort } @@ -867,15 +930,15 @@ queue: return resp abort: go func(i int) { - for ; i < len(resp); i++ { + for ; i < len(resp.s); i++ { <-ch } atomic.AddInt32(&p.waits, -1) atomic.AddInt32(&p.recvs, 1) }(i) err := newErrResult(ctx.Err()) - for ; i < len(resp); i++ { - resp[i] = err + for ; i < len(resp.s); i++ { + resp.s[i] = err } return resp } @@ -986,7 +1049,8 @@ func (p *pipe) DoCache(ctx context.Context, cmd Cacheable, ttl time.Duration) Re Completed(cmd), cmds.ExecCmd, ) - exec, err := resp[4].ToArray() + defer resultsp.Put(resp) + exec, err := resp.s[4].ToArray() if err != nil { if _, ok := err.(*RedisError); ok { err = ErrDoCacheAborted @@ -999,14 +1063,15 @@ func (p *pipe) DoCache(ctx context.Context, cmd Cacheable, ttl time.Duration) Re func (p *pipe) doCacheMGet(ctx context.Context, cmd Cacheable, ttl time.Duration) RedisResult { commands := cmd.Commands() - entries := make(map[int]CacheEntry) + keys := len(commands) - 1 builder := cmds.NewBuilder(cmds.InitSlot) result := RedisResult{val: RedisMessage{typ: '*', values: nil}} mgetcc := cmds.MGetCacheCmd(cmd) - keys := len(commands) - 1 if mgetcc[0] == 'J' { keys-- // the last one of JSON.MGET is a path, not a key } + entries := entriesp.Get(keys, keys) + defer entriesp.Put(entries) var now = time.Now() var rewrite cmds.Arbitrary for i, key := range commands[1 : keys+1] { @@ -1019,7 +1084,7 @@ func (p *pipe) doCacheMGet(ctx context.Context, cmd Cacheable, ttl time.Duration continue } if entry != nil { - entries[i] = entry // store entries for later entry.Wait() to avoid MGET deadlock each others. + entries.e[i] = entry // store entries for later entry.Wait() to avoid MGET deadlock each others. continue } if rewrite.IsZero() { @@ -1048,7 +1113,8 @@ func (p *pipe) doCacheMGet(ctx context.Context, cmd Cacheable, ttl time.Duration multi = append(multi, rewritten, cmds.ExecCmd) resp := p.DoMulti(ctx, multi...) - exec, err := resp[len(multi)-1].ToArray() + defer resultsp.Put(resp) + exec, err := resp.s[len(multi)-1].ToArray() if err != nil { if _, ok := err.(*RedisError); ok { err = ErrDoCacheAborted @@ -1075,7 +1141,7 @@ func (p *pipe) doCacheMGet(ctx context.Context, cmd Cacheable, ttl time.Duration if len(result.val.values) == 0 { result.val.values = make([]RedisMessage, keys) } - for i, entry := range entries { + for i, entry := range entries.e { v, err := entry.Wait(ctx) if err != nil { return newErrResult(err) @@ -1095,7 +1161,7 @@ func (p *pipe) doCacheMGet(ctx context.Context, cmd Cacheable, ttl time.Duration return result } -func (p *pipe) DoMultiCache(ctx context.Context, multi ...CacheableTTL) []RedisResult { +func (p *pipe) DoMultiCache(ctx context.Context, multi ...CacheableTTL) *redisresults { if p.cache == nil { commands := make([]Completed, len(multi)) for i, ct := range multi { @@ -1106,58 +1172,75 @@ func (p *pipe) DoMultiCache(ctx context.Context, multi ...CacheableTTL) []RedisR cmds.CacheableCS(multi[0].Cmd).Verify() - results := make([]RedisResult, len(multi)) - entries := make(map[int]CacheEntry) - missing := []Completed{cmds.OptInCmd, cmds.MultiCmd} + results := resultsp.Get(len(multi), len(multi)) + entries := entriesp.Get(len(multi), len(multi)) + defer entriesp.Put(entries) + var missing []Completed now := time.Now() - for i, ct := range multi { + for _, ct := range multi { if ct.Cmd.IsMGet() { panic(panicmgetcsc) } - ck, cc := cmds.CacheKey(ct.Cmd) - v, entry := p.cache.Flight(ck, cc, ct.TTL, now) - if v.typ != 0 { // cache hit for one key - results[i] = newResult(v, nil) - continue + } + if cache, ok := p.cache.(*lru); ok { + missed := cache.Flights(now, multi, results.s, entries.e) + for _, i := range missed { + ct := multi[i] + ck, _ := cmds.CacheKey(ct.Cmd) + missing = append(missing, cmds.OptInCmd, cmds.MultiCmd, cmds.NewCompleted([]string{"PTTL", ck}), Completed(ct.Cmd), cmds.ExecCmd) } - if entry != nil { - entries[i] = entry // store entries for later entry.Wait() to avoid MGET deadlock each others. - continue + } else { + for i, ct := range multi { + ck, cc := cmds.CacheKey(ct.Cmd) + v, entry := p.cache.Flight(ck, cc, ct.TTL, now) + if v.typ != 0 { // cache hit for one key + results.s[i] = newResult(v, nil) + continue + } + if entry != nil { + entries.e[i] = entry // store entries for later entry.Wait() to avoid MGET deadlock each others. + continue + } + missing = append(missing, cmds.OptInCmd, cmds.MultiCmd, cmds.NewCompleted([]string{"PTTL", ck}), Completed(ct.Cmd), cmds.ExecCmd) } - missing = append(missing, cmds.NewCompleted([]string{"PTTL", ck}), Completed(ct.Cmd)) } - var exec []RedisMessage - var err error - if len(missing) > 2 { - missing = append(missing, cmds.ExecCmd) - resp := p.DoMulti(ctx, missing...) - exec, err = resp[len(missing)-1].ToArray() - if err != nil { - if _, ok := err.(*RedisError); ok { - err = ErrDoCacheAborted - } - for i := 3; i < len(missing); i += 2 { - cacheable := Cacheable(missing[i]) - ck, cc := cmds.CacheKey(cacheable) + var resp *redisresults + if len(missing) > 0 { + resp = p.DoMulti(ctx, missing...) + defer resultsp.Put(resp) + for i := 4; i < len(resp.s); i += 5 { + if err := resp.s[i].Error(); err != nil { + if _, ok := err.(*RedisError); ok { + err = ErrDoCacheAborted + } + ck, cc := cmds.CacheKey(Cacheable(missing[i-1])) p.cache.Cancel(ck, cc, err) } - for i := range results { - results[i] = newErrResult(err) - } - return results } } - for i, entry := range entries { - results[i] = newResult(entry.Wait(ctx)) + for i, entry := range entries.e { + results.s[i] = newResult(entry.Wait(ctx)) + } + + if len(missing) == 0 { + return results } j := 0 - for i := 1; i < len(exec); i += 2 { - for ; j < len(results); j++ { - if results[j].val.typ == 0 && results[j].err == nil { - results[j] = newResult(exec[i], nil) + for i := 4; i < len(resp.s); i += 5 { + for ; j < len(results.s); j++ { + if results.s[j].val.typ == 0 && results.s[j].err == nil { + exec, err := resp.s[i].ToArray() + if err != nil { + if _, ok := err.(*RedisError); ok { + err = ErrDoCacheAborted + } + results.s[j] = newErrResult(err) + } else { + results.s[j] = newResult(exec[len(exec)-1], nil) + } break } } @@ -1232,6 +1315,7 @@ func epipeFn(err error) *pipe { const ( protocolbug = "protocol bug, message handled out of order" wrongreceive = "only SUBSCRIBE, SSUBSCRIBE, or PSUBSCRIBE command are allowed in Receive" + multiexecsub = "SUBSCRIBE/UNSUBSCRIBE are not allowed in MULTI/EXEC block" panicmgetcsc = "MGET and JSON.MGET in DoMultiCache are not implemented, use DoCache instead" ) diff --git a/vendor/github.com/redis/rueidis/resp.go b/vendor/github.com/redis/rueidis/resp.go index 914cf9d3b03..91cd9482172 100644 --- a/vendor/github.com/redis/rueidis/resp.go +++ b/vendor/github.com/redis/rueidis/resp.go @@ -11,27 +11,65 @@ import ( var errChunked = errors.New("unbounded redis message") var errOldNull = errors.New("RESP2 null") +const ( + typeBlobString = byte('$') + typeSimpleString = byte('+') + typeSimpleErr = byte('-') + typeInteger = byte(':') + typeNull = byte('_') + typeEnd = byte('.') + typeFloat = byte(',') + typeBool = byte('#') + typeBlobErr = byte('!') + typeVerbatimString = byte('=') + typeBigNumber = byte('(') + typeArray = byte('*') + typeMap = byte('%') + typeSet = byte('~') + typeAttribute = byte('|') + typePush = byte('>') +) + +var typeNames = make(map[byte]string, 16) + type reader func(i *bufio.Reader) (RedisMessage, error) var readers = [256]reader{} func init() { - readers['$'] = readBlobString - readers['+'] = readSimpleString - readers['-'] = readSimpleString - readers[':'] = readInteger - readers['_'] = readNull - readers[','] = readSimpleString - readers['#'] = readBoolean - readers['!'] = readBlobString - readers['='] = readBlobString - readers['('] = readSimpleString - readers['*'] = readArray - readers['%'] = readMap - readers['~'] = readArray - readers['|'] = readMap - readers['>'] = readArray - readers['.'] = readNull + readers[typeBlobString] = readBlobString + readers[typeSimpleString] = readSimpleString + readers[typeSimpleErr] = readSimpleString + readers[typeInteger] = readInteger + readers[typeNull] = readNull + readers[typeFloat] = readSimpleString + readers[typeBool] = readBoolean + readers[typeBlobErr] = readBlobString + readers[typeVerbatimString] = readBlobString + readers[typeBigNumber] = readSimpleString + readers[typeArray] = readArray + readers[typeMap] = readMap + readers[typeSet] = readArray + readers[typeAttribute] = readMap + readers[typePush] = readArray + readers[typeEnd] = readNull + + typeNames[typeBlobString] = "blob string" + typeNames[typeSimpleString] = "simple string" + typeNames[typeSimpleErr] = "simple error" + typeNames[typeInteger] = "int64" + typeNames[typeNull] = "null" + typeNames[typeFloat] = "float64" + typeNames[typeBool] = "boolean" + typeNames[typeBlobErr] = "blob error" + typeNames[typeVerbatimString] = "verbatim string" + typeNames[typeBigNumber] = "big number" + typeNames[typeArray] = "array" + typeNames[typeMap] = "map" + typeNames[typeSet] = "set" + typeNames[typeAttribute] = "attribute" + typeNames[typePush] = "push" + typeNames[typeEnd] = "null" } func readSimpleString(i *bufio.Reader) (m RedisMessage, err error) { @@ -223,12 +261,12 @@ func readNextMessage(i *bufio.Reader) (m RedisMessage, err error) { } if m, err = fn(i); err != nil { if err == errOldNull { - return RedisMessage{typ: '_'}, nil + return RedisMessage{typ: typeNull}, nil } return RedisMessage{}, err } m.typ = typ - if m.typ == '|' { // handle the attributes + if m.typ == typeAttribute { // handle the attributes a := m // clone the original m first, and then take address of the clone attrs = &a // to avoid go compiler allocating the m on heap which causing worse performance. m = RedisMessage{} diff --git a/vendor/github.com/redis/rueidis/rueidis.go b/vendor/github.com/redis/rueidis/rueidis.go index 4d45bca34d2..e5c29f057b5 100644 --- a/vendor/github.com/redis/rueidis/rueidis.go +++ b/vendor/github.com/redis/rueidis/rueidis.go @@ -74,10 +74,14 @@ type ClientOption struct { Password string ClientName string + // ClientSetInfo will assign various info attributes to the current connection + ClientSetInfo []string + // InitAddress point to redis nodes. // Rueidis will connect to them one by one and issue CLUSTER SLOT command to initialize the cluster client until success. // If len(InitAddress) == 1 and the address is not running in cluster mode, rueidis will fall back to the single client mode. // If ClientOption.Sentinel.MasterSet is set, then InitAddress will be used to connect sentinels + // You can bypass this behaviour by using ClientOption.ForceSingleClient. InitAddress []string // ClientTrackingOptions will be appended to CLIENT TRACKING ON command when the connection is established. @@ -107,7 +111,7 @@ type ClientOption struct { // PipelineMultiplex determines how many tcp connections used to pipeline commands to one redis instance. // The default for single and sentinel clients is 2, which means 4 connections (2^2). - // The default for cluster client is 0, which means 1 connection (2^0). + // For cluster client, PipelineMultiplex doesn't have any effect. PipelineMultiplex int // ConnWriteTimeout is applied net.Conn.SetWriteDeadline and periodic PING to redis @@ -127,6 +131,8 @@ type ClientOption struct { // ShuffleInit is a handy flag that shuffles the InitAddress after passing to the NewClient() if it is true ShuffleInit bool + // ClientNoTouch controls whether commands alter LRU/LFU stats + ClientNoTouch bool // DisableRetry disables retrying read-only commands under network errors DisableRetry bool // DisableCache falls back Client.DoCache/Client.DoMultiCache to Client.Do/Client.DoMulti @@ -135,6 +141,19 @@ type ClientOption struct { AlwaysPipelining bool // AlwaysRESP2 makes rueidis.Client always uses RESP2, otherwise it will try using RESP3 first. AlwaysRESP2 bool + // ForceSingleClient force the usage of a single client connection, without letting the lib guessing + // if redis instance is a cluster or a single redis instance. + ForceSingleClient bool + + // ReplicaOnly indicates that this client will only try to connect to readonly replicas of redis setup. + // currently, it is only implemented for sentinel client + ReplicaOnly bool + + // ClientNoEvict sets the client eviction mode for the current connection. + // When turned on and client eviction is configured, + // the current connection will be excluded from the client eviction process + // even if we're above the configured client eviction threshold. + ClientNoEvict bool } // SentinelOption contains MasterSet, @@ -279,9 +298,16 @@ func NewClient(option ClientOption) (client Client, err error) { option.PipelineMultiplex = singleClientMultiplex(option.PipelineMultiplex) return newSentinelClient(&option, makeConn) } + pmbk := option.PipelineMultiplex + option.PipelineMultiplex = 0 // PipelineMultiplex is meaningless for cluster client + + if option.ForceSingleClient { + option.PipelineMultiplex = singleClientMultiplex(pmbk) + return newSingleClient(&option, nil, makeConn) + } if client, err = newClusterClient(&option, makeConn); err != nil { if len(option.InitAddress) == 1 && (err.Error() == redisErrMsgCommandNotAllow || strings.Contains(strings.ToUpper(err.Error()), "CLUSTER")) { - option.PipelineMultiplex = singleClientMultiplex(option.PipelineMultiplex) + option.PipelineMultiplex = singleClientMultiplex(pmbk) client, err = newSingleClient(&option, client.(*clusterClient).single(), makeConn) } else if client != (*clusterClient)(nil) { client.Close() diff --git a/vendor/github.com/redis/rueidis/sentinel.go b/vendor/github.com/redis/rueidis/sentinel.go index 8f8a5cf4d2b..e46ec01ab50 100644 --- a/vendor/github.com/redis/rueidis/sentinel.go +++ b/vendor/github.com/redis/rueidis/sentinel.go @@ -5,11 +5,14 @@ import ( "context" "errors" "fmt" + "net" "strings" "sync" "sync/atomic" "time" + "math/rand" + "github.com/redis/rueidis/internal/cmds" ) @@ -21,6 +24,7 @@ func newSentinelClient(opt *ClientOption, connFn connFn) (client *sentinelClient connFn: connFn, sentinels: list.New(), retry: !opt.DisableRetry, + replica: opt.ReplicaOnly, } for _, sentinel := range opt.InitAddress { @@ -49,6 +53,7 @@ type sentinelClient struct { stop uint32 cmd cmds.Builder retry bool + replica bool } func (c *sentinelClient) B() cmds.Builder { @@ -67,25 +72,26 @@ retry: return resp } -func (c *sentinelClient) DoMulti(ctx context.Context, multi ...Completed) (resps []RedisResult) { +func (c *sentinelClient) DoMulti(ctx context.Context, multi ...Completed) []RedisResult { if len(multi) == 0 { return nil } retry: - resps = c.mConn.Load().(conn).DoMulti(ctx, multi...) + resps := c.mConn.Load().(conn).DoMulti(ctx, multi...) if c.retry && allReadOnly(multi) { - for _, resp := range resps { + for _, resp := range resps.s { if c.isRetryable(resp.NonRedisError(), ctx) { + resultsp.Put(resps) goto retry } } } for i, cmd := range multi { - if resps[i].NonRedisError() == nil { + if resps.s[i].NonRedisError() == nil { cmds.PutCompleted(cmd) } } - return resps + return resps.s } func (c *sentinelClient) DoCache(ctx context.Context, cmd Cacheable, ttl time.Duration) (resp RedisResult) { @@ -100,25 +106,26 @@ retry: return resp } -func (c *sentinelClient) DoMultiCache(ctx context.Context, multi ...CacheableTTL) (resps []RedisResult) { +func (c *sentinelClient) DoMultiCache(ctx context.Context, multi ...CacheableTTL) []RedisResult { if len(multi) == 0 { return nil } retry: - resps = c.mConn.Load().(conn).DoMultiCache(ctx, multi...) + resps := c.mConn.Load().(conn).DoMultiCache(ctx, multi...) if c.retry { - for _, resp := range resps { + for _, resp := range resps.s { if c.isRetryable(resp.NonRedisError(), ctx) { + resultsp.Put(resps) goto retry } } } for i, cmd := range multi { - if err := resps[i].NonRedisError(); err == nil || err == ErrDoCacheAborted { + if err := resps.s[i].NonRedisError(); err == nil || err == ErrDoCacheAborted { cmds.PutCacheable(cmd.Cmd) } } - return resps + return resps.s } func (c *sentinelClient) Receive(ctx context.Context, subscribe Completed, fn func(msg PubSubMessage)) (err error) { @@ -187,42 +194,50 @@ func (c *sentinelClient) _addSentinel(addr string) { c.sentinels.PushFront(addr) } -func (c *sentinelClient) switchMasterRetry(addr string) { +func (c *sentinelClient) switchTargetRetry(addr string) { c.mu.Lock() - err := c._switchMaster(addr) + err := c._switchTarget(addr) c.mu.Unlock() if err != nil { go c.refreshRetry() } } -func (c *sentinelClient) _switchMaster(addr string) (err error) { - var master conn +func (c *sentinelClient) _switchTarget(addr string) (err error) { + var target conn if atomic.LoadUint32(&c.stop) == 1 { return nil } if c.mAddr == addr { - master = c.mConn.Load().(conn) - if master.Error() != nil { - master = nil + target = c.mConn.Load().(conn) + if target.Error() != nil { + target = nil } } - if master == nil { - master = c.connFn(addr, c.mOpt) - if err = master.Dial(); err != nil { + if target == nil { + target = c.connFn(addr, c.mOpt) + if err = target.Dial(); err != nil { return err } } - if resp, err := master.Do(context.Background(), cmds.RoleCmd).ToArray(); err != nil { - master.Close() + + resp, err := target.Do(context.Background(), cmds.RoleCmd).ToArray() + if err != nil { + target.Close() return err - } else if resp[0].string != "master" { - master.Close() + } + + if c.replica && resp[0].string != "slave" { + target.Close() + return errNotSlave + } else if !c.replica && resp[0].string != "master" { + target.Close() return errNotMaster } + c.mAddr = addr - if old := c.mConn.Swap(master); old != nil { - if prev := old.(conn); prev != master { + if old := c.mConn.Swap(target); old != nil { + if prev := old.(conn); prev != target { prev.Close() } } @@ -241,7 +256,7 @@ func (c *sentinelClient) refresh() (err error) { } func (c *sentinelClient) _refresh() (err error) { - var master string + var target string var sentinels []string c.mu.Lock() @@ -262,11 +277,15 @@ func (c *sentinelClient) _refresh() (err error) { err = c.sConn.Dial() } if err == nil { - if master, sentinels, err = c.listWatch(c.sConn); err == nil { + // listWatch returns server address with sentinels. + // check if target is master or replica + if target, sentinels, err = c.listWatch(c.sConn); err == nil { for _, sentinel := range sentinels { c._addSentinel(sentinel) } - if err = c._switchMaster(master); err == nil { + + // _switchTarget will switch the connection for master OR replica + if err = c._switchTarget(target); err == nil { break } } @@ -289,32 +308,49 @@ func (c *sentinelClient) _refresh() (err error) { return err } -func (c *sentinelClient) listWatch(cc conn) (master string, sentinels []string, err error) { +// listWatch will use sentinel to list current master|replica address along with sentinels address +func (c *sentinelClient) listWatch(cc conn) (target string, sentinels []string, err error) { ctx := context.Background() sentinelsCMD := c.cmd.SentinelSentinels().Master(c.mOpt.Sentinel.MasterSet).Build() getMasterCMD := c.cmd.SentinelGetMasterAddrByName().Master(c.mOpt.Sentinel.MasterSet).Build() + replicasCMD := c.cmd.SentinelReplicas().Master(c.mOpt.Sentinel.MasterSet).Build() + defer func() { if err == nil { // not recycle cmds if error, since cmds may be used later in pipe. consider recycle them by pipe cmds.PutCompleted(sentinelsCMD) cmds.PutCompleted(getMasterCMD) + cmds.PutCompleted(replicasCMD) } }() + // unsubscribe in case there is any previous subscription + cc.Do(ctx, cmds.SentinelUnSubscribe) + go func(cc conn) { if err := cc.Receive(ctx, cmds.SentinelSubscribe, func(event PubSubMessage) { switch event.Channel { case "+sentinel": m := strings.SplitN(event.Message, " ", 4) - c.addSentinel(fmt.Sprintf("%s:%s", m[2], m[3])) + c.addSentinel(net.JoinHostPort(m[2], m[3])) case "+switch-master": m := strings.SplitN(event.Message, " ", 5) if m[0] == c.sOpt.Sentinel.MasterSet { - c.switchMasterRetry(fmt.Sprintf("%s:%s", m[3], m[4])) + c.switchTargetRetry(net.JoinHostPort(m[3], m[4])) } case "+reboot": - m := strings.SplitN(event.Message, " ", 4) + m := strings.SplitN(event.Message, " ", 7) if m[0] == "master" && m[1] == c.sOpt.Sentinel.MasterSet { - c.switchMasterRetry(fmt.Sprintf("%s:%s", m[2], m[3])) + c.switchTargetRetry(net.JoinHostPort(m[2], m[3])) + } else if c.replica && m[0] == "slave" && m[5] == c.sOpt.Sentinel.MasterSet { + c.refreshRetry() + } + // note that in case of failover, every slave in the setup + // will send +slave event individually. + case "+slave", "+sdown", "-sdown": + m := strings.SplitN(event.Message, " ", 7) + if c.replica && m[0] == "slave" && m[5] == c.sOpt.Sentinel.MasterSet { + // call refresh to randomly choose a new slave + c.refreshRetry() } } }); err != nil && atomic.LoadUint32(&c.stop) == 0 { @@ -322,21 +358,68 @@ func (c *sentinelClient) listWatch(cc conn) (master string, sentinels []string, } }(cc) - resp := cc.DoMulti(ctx, sentinelsCMD, getMasterCMD) - others, err := resp[0].ToArray() + var commands Commands + if c.replica { + commands = Commands{sentinelsCMD, replicasCMD} + } else { + commands = Commands{sentinelsCMD, getMasterCMD} + } + + resp := cc.DoMulti(ctx, commands...) + defer resultsp.Put(resp) + others, err := resp.s[0].ToArray() if err != nil { return "", nil, err } for _, other := range others { if m, err := other.AsStrMap(); err == nil { - sentinels = append(sentinels, fmt.Sprintf("%s:%s", m["ip"], m["port"])) + sentinels = append(sentinels, net.JoinHostPort(m["ip"], m["port"])) + } + } + + // we return random slave address instead of master + if c.replica { + addr, err := pickReplica(resp.s) + if err != nil { + return "", nil, err } + + return addr, sentinels, nil } - m, err := resp[1].AsStrSlice() + + // otherwise send master as address + m, err := resp.s[1].AsStrSlice() if err != nil { return "", nil, err } - return fmt.Sprintf("%s:%s", m[0], m[1]), sentinels, nil + return net.JoinHostPort(m[0], m[1]), sentinels, nil +} + +func pickReplica(resp []RedisResult) (string, error) { + replicas, err := resp[1].ToArray() + if err != nil { + return "", err + } + + eligible := make([]map[string]string, 0, len(replicas)) + // eliminate replicas with s_down condition + for i := range replicas { + replica, err := replicas[i].AsStrMap() + if err != nil { + continue + } + if _, ok := replica["s-down-time"]; !ok { + eligible = append(eligible, replica) + } + } + + if len(eligible) == 0 { + return "", fmt.Errorf("not enough ready replicas") + } + + // choose a replica randomly + m := eligible[rand.Intn(len(eligible))] + return net.JoinHostPort(m["ip"], m["port"]), nil } func newSentinelOpt(opt *ClientOption) *ClientOption { @@ -350,4 +433,7 @@ func newSentinelOpt(opt *ClientOption) *ClientOption { return &o } -var errNotMaster = errors.New("the redis is not master") +var ( + errNotMaster = errors.New("the redis role is not master") + errNotSlave = errors.New("the redis role is not slave") +) diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go index 41625dc17d7..64add7fb51d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/block.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -295,6 +295,10 @@ func DownloadMeta(ctx context.Context, logger log.Logger, bkt objstore.Bucket, i return m, nil } +func IsBlockMetaFile(path string) bool { + return filepath.Base(path) == MetaFilename +} + func IsBlockDir(path string) (id ulid.ULID, ok bool) { id, err := ulid.Parse(filepath.Base(path)) return id, err == nil diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index e97fc62f8bb..d2953ae9c50 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -11,6 +11,7 @@ import ( "path" "path/filepath" "sort" + "strings" "sync" "time" @@ -234,17 +235,6 @@ func (f *BaseFetcher) loadMeta(ctx context.Context, id ulid.ULID) (*metadata.Met cachedBlockDir = filepath.Join(f.cacheDir, id.String()) ) - // TODO(bwplotka): If that causes problems (obj store rate limits), add longer ttl to cached items. - // For 1y and 100 block sources this generates ~1.5-3k HEAD RPM. AWS handles 330k RPM per prefix. - // TODO(bwplotka): Consider filtering by consistency delay here (can't do until compactor healthyOverride work). - ok, err := f.bkt.Exists(ctx, metaFile) - if err != nil { - return nil, errors.Wrapf(err, "meta.json file exists: %v", metaFile) - } - if !ok { - return nil, ErrorSyncMetaNotFound - } - if m, seen := f.cached[id]; seen { return m, nil } @@ -360,14 +350,24 @@ func (f *BaseFetcher) fetchMetadata(ctx context.Context) (interface{}, error) { }) } + partialBlocks := make(map[ulid.ULID]bool) // Workers scheduled, distribute blocks. eg.Go(func() error { defer close(ch) return f.bkt.Iter(ctx, "", func(name string) error { - id, ok := IsBlockDir(name) + parts := strings.Split(name, "/") + dir, file := parts[0], parts[len(parts)-1] + id, ok := IsBlockDir(dir) if !ok { return nil } + if _, ok := partialBlocks[id]; !ok { + partialBlocks[id] = true + } + if !IsBlockMetaFile(file) { + return nil + } + partialBlocks[id] = false select { case <-ctx.Done(): @@ -376,13 +376,22 @@ func (f *BaseFetcher) fetchMetadata(ctx context.Context) (interface{}, error) { } return nil - }) + }, objstore.WithRecursiveIter) }) if err := eg.Wait(); err != nil { return nil, errors.Wrap(err, "BaseFetcher: iter bucket") } + mtx.Lock() + for blockULID, isPartial := range partialBlocks { + if isPartial { + resp.partial[blockULID] = errors.Errorf("block %s has no meta file", blockULID) + resp.noMetas++ + } + } + mtx.Unlock() + if len(resp.metaErrs) > 0 { return resp, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go index 0c7d062c9c3..1befe63a7f2 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go @@ -250,13 +250,7 @@ type binaryWriter struct { } func newBinaryWriter(id ulid.ULID, cacheFilename string, buf []byte) (w *binaryWriter, err error) { - var memoryWriter *MemoryWriter - memoryWriter, err = NewMemoryWriter(id, len(buf)) - if err != nil { - return nil, err - } - var binWriter PosWriter = memoryWriter - + var binWriter PosWriter if cacheFilename != "" { dir := filepath.Dir(cacheFilename) @@ -277,9 +271,8 @@ func newBinaryWriter(id ulid.ULID, cacheFilename string, buf []byte) (w *binaryW return nil, errors.Wrap(err, "remove any existing index at path") } - // We use file writer for buffers not larger than reused one. var fileWriter *FileWriter - fileWriter, err = NewFileWriter(cacheFilename, memoryWriter) + fileWriter, err = NewFileWriter(cacheFilename, len(buf)) if err != nil { return nil, err } @@ -287,6 +280,8 @@ func newBinaryWriter(id ulid.ULID, cacheFilename string, buf []byte) (w *binaryW return nil, errors.Wrap(err, "sync dir") } binWriter = fileWriter + } else { + binWriter = NewMemoryWriter(id, len(buf)) } w = &binaryWriter{ @@ -304,10 +299,14 @@ func newBinaryWriter(id ulid.ULID, cacheFilename string, buf []byte) (w *binaryW return w, w.writer.Write(w.buf.Get()) } +type PosWriterWithBuffer interface { + PosWriter + Buffer() []byte +} + type PosWriter interface { Pos() uint64 Write(bufs ...[]byte) error - Buffer() []byte Flush() error Sync() error Close() error @@ -315,18 +314,16 @@ type PosWriter interface { type MemoryWriter struct { id ulid.ULID - buf bytes.Buffer + buf *bytes.Buffer pos uint64 } -// TODO(bwplotka): Added size to method, upstream this. -func NewMemoryWriter(id ulid.ULID, size int) (*MemoryWriter, error) { - var buf bytes.Buffer +func NewMemoryWriter(id ulid.ULID, size int) *MemoryWriter { return &MemoryWriter{ id: id, - buf: buf, + buf: bytes.NewBuffer(make([]byte, 0, size)), pos: 0, - }, nil + } } func (mw *MemoryWriter) Pos() uint64 { @@ -369,58 +366,52 @@ func (mw *MemoryWriter) Close() error { type FileWriter struct { f *os.File - memWriter *MemoryWriter fileWriter *bufio.Writer name string + pos uint64 } // TODO(bwplotka): Added size to method, upstream this. -func NewFileWriter(name string, memWriter *MemoryWriter) (*FileWriter, error) { +func NewFileWriter(name string, size int) (*FileWriter, error) { f, err := os.OpenFile(filepath.Clean(name), os.O_CREATE|os.O_RDWR, 0600) if err != nil { return nil, err } return &FileWriter{ f: f, - memWriter: memWriter, - fileWriter: bufio.NewWriterSize(f, memWriter.buf.Len()), + fileWriter: bufio.NewWriterSize(f, size), name: name, + pos: 0, }, nil } func (fw *FileWriter) Pos() uint64 { - return fw.memWriter.Pos() + return fw.pos } func (fw *FileWriter) Write(bufs ...[]byte) error { - if err := fw.memWriter.Write(bufs...); err != nil { - return err - } for _, b := range bufs { - _, err := fw.fileWriter.Write(b) + n, err := fw.fileWriter.Write(b) + fw.pos += uint64(n) if err != nil { return err } + // For now the index file must not grow beyond 64GiB. Some of the fixed-sized + // offset references in v1 are only 4 bytes large. + // Once we move to compressed/varint representations in those areas, this limitation + // can be lifted. + if fw.pos > 16*math.MaxUint32 { + return errors.Errorf("%q exceeding max size of 64GiB", fw.name) + } } return nil } -func (fw *FileWriter) Buffer() []byte { - return fw.memWriter.Buffer() -} - func (fw *FileWriter) Flush() error { - if err := fw.memWriter.Flush(); err != nil { - return err - } - return fw.fileWriter.Flush() } func (fw *FileWriter) Close() error { - if err := fw.memWriter.Close(); err != nil { - return err - } if err := fw.Flush(); err != nil { return err } @@ -431,9 +422,6 @@ func (fw *FileWriter) Close() error { } func (fw *FileWriter) Sync() error { - if err := fw.memWriter.Sync(); err != nil { - return err - } return fw.f.Sync() } @@ -476,7 +464,11 @@ func (w *binaryWriter) Write(p []byte) (int, error) { } func (w *binaryWriter) Buffer() []byte { - return w.writer.Buffer() + pwb, ok := w.writer.(PosWriterWithBuffer) + if ok { + return pwb.Buffer() + } + return nil } func (w *binaryWriter) Close() error { diff --git a/vendor/github.com/thanos-io/thanos/pkg/cache/groupcache.go b/vendor/github.com/thanos-io/thanos/pkg/cache/groupcache.go index b908b0c7a80..609959d0cb4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/cache/groupcache.go +++ b/vendor/github.com/thanos-io/thanos/pkg/cache/groupcache.go @@ -197,6 +197,26 @@ func NewGroupcacheWithConfig(logger log.Logger, reg prometheus.Registerer, conf return err } + return dest.UnmarshalBinary(encodedList, time.Now().Add(iterCfg.TTL)) + case cachekey.IterRecursiveVerb: + _, iterCfg := cfg.FindIterConfig(parsedData.Name) + if iterCfg == nil { + panic("caching bucket layer must not call on unconfigured paths") + } + + var list []string + if err := bucket.Iter(ctx, parsedData.Name, func(s string) error { + list = append(list, s) + return nil + }, objstore.WithRecursiveIter); err != nil { + return err + } + + encodedList, err := json.Marshal(list) + if err != nil { + return err + } + return dest.UnmarshalBinary(encodedList, time.Now().Add(iterCfg.TTL)) case cachekey.ContentVerb: _, contentCfg := cfg.FindGetConfig(parsedData.Name) diff --git a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/redis_client.go b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/redis_client.go index a7ab81fadf3..6a701b14f82 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/redis_client.go +++ b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/redis_client.go @@ -31,9 +31,6 @@ var ( DialTimeout: time.Second * 5, ReadTimeout: time.Second * 3, WriteTimeout: time.Second * 3, - PoolSize: 100, - MinIdleConns: 10, - IdleTimeout: time.Minute * 5, MaxGetMultiConcurrency: 100, GetMultiBatchSize: 100, MaxSetMultiConcurrency: 100, @@ -84,22 +81,6 @@ type RedisClientConfig struct { // WriteTimeout specifies the client write timeout. WriteTimeout time.Duration `yaml:"write_timeout"` - // Maximum number of socket connections. - PoolSize int `yaml:"pool_size"` - - // MinIdleConns specifies the minimum number of idle connections which is useful when establishing - // new connection is slow. - MinIdleConns int `yaml:"min_idle_conns"` - - // Amount of time after which client closes idle connections. - // Should be less than server's timeout. - // -1 disables idle timeout check. - IdleTimeout time.Duration `yaml:"idle_timeout"` - - // Connection age at which client retires (closes) the connection. - // Default 0 is to not close aged connections. - MaxConnAge time.Duration `yaml:"max_conn_age"` - // MaxGetMultiConcurrency specifies the maximum number of concurrent GetMulti() operations. // If set to 0, concurrency is unlimited. MaxGetMultiConcurrency int `yaml:"max_get_multi_concurrency"` diff --git a/vendor/github.com/thanos-io/thanos/pkg/extkingpin/path_content_reloader.go b/vendor/github.com/thanos-io/thanos/pkg/extkingpin/path_content_reloader.go index 68c2cd252c6..e0d8fdf74ba 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/extkingpin/path_content_reloader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/extkingpin/path_content_reloader.go @@ -5,13 +5,11 @@ package extkingpin import ( "context" - "fmt" + "crypto/sha256" "os" - "path" "path/filepath" "time" - "github.com/fsnotify/fsnotify" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/pkg/errors" @@ -22,73 +20,61 @@ type fileContent interface { Path() string } -// PathContentReloader starts a file watcher that monitors the file indicated by fileContent.Path() and runs -// reloadFunc whenever a change is detected. -// A debounce timer can be configured via opts to handle situations where many "write" events are received together or -// a "create" event is followed up by a "write" event, for example. Files will be effectively reloaded at the latest -// after 2 times the debounce timer. By default the debouncer timer is 1 second. -// To ensure renames and deletes are properly handled, the file watcher is put at the file's parent folder. See -// https://github.com/fsnotify/fsnotify/issues/214 for more details. +// PathContentReloader runs the reloadFunc when it detects that the contents of fileContent have changed. func PathContentReloader(ctx context.Context, fileContent fileContent, logger log.Logger, reloadFunc func(), debounceTime time.Duration) error { filePath, err := filepath.Abs(fileContent.Path()) if err != nil { return errors.Wrap(err, "getting absolute file path") } - watcher, err := fsnotify.NewWatcher() - if filePath == "" { - level.Debug(logger).Log("msg", "no path detected for config reload") + engine := &pollingEngine{ + filePath: filePath, + logger: logger, + debounce: debounceTime, + reloadFunc: reloadFunc, } - if err != nil { - return errors.Wrap(err, "creating file watcher") + return engine.start(ctx) +} + +// pollingEngine keeps rereading the contents at filePath and when its checksum changes it runs the reloadFunc. +type pollingEngine struct { + filePath string + logger log.Logger + debounce time.Duration + reloadFunc func() + previousChecksum [sha256.Size]byte +} + +func (p *pollingEngine) start(ctx context.Context) error { + configReader := func() { + // check if file still exists + if _, err := os.Stat(p.filePath); os.IsNotExist(err) { + level.Error(p.logger).Log("msg", "file does not exist", "error", err) + return + } + file, err := os.ReadFile(p.filePath) + if err != nil { + level.Error(p.logger).Log("msg", "error opening file", "error", err) + return + } + checksum := sha256.Sum256(file) + if checksum == p.previousChecksum { + return + } + p.reloadFunc() + p.previousChecksum = checksum + level.Debug(p.logger).Log("msg", "configuration reloaded", "path", p.filePath) } go func() { - var reloadTimer *time.Timer - if debounceTime != 0 { - reloadTimer = time.AfterFunc(debounceTime, func() { - reloadFunc() - level.Debug(logger).Log("msg", "configuration reloaded after debouncing") - }) - } - defer watcher.Close() for { select { case <-ctx.Done(): - if reloadTimer != nil { - reloadTimer.Stop() - } return - case event := <-watcher.Events: - // fsnotify sometimes sends a bunch of events without name or operation. - // It's unclear what they are and why they are sent - filter them out. - if event.Name == "" { - break - } - // We are watching the file's parent folder (more details on this is done can be found below), but are - // only interested in changed to the target file. Discard every other file as quickly as possible. - if event.Name != filePath { - break - } - // We only react to files being written or created. - // On chmod or remove we have nothing to do. - // On rename we have the old file name (not useful). A create event for the new file will come later. - if event.Op&fsnotify.Write == 0 && event.Op&fsnotify.Create == 0 { - break - } - level.Debug(logger).Log("msg", fmt.Sprintf("change detected for %s", filePath), "eventName", event.Name, "eventOp", event.Op) - if reloadTimer != nil { - reloadTimer.Reset(debounceTime) - } - case err := <-watcher.Errors: - level.Error(logger).Log("msg", "watcher error", "error", err) + case <-time.After(p.debounce): + configReader() } } }() - // We watch the file's parent folder and not the file itself to better handle DELETE and RENAME events. Check - // https://github.com/fsnotify/fsnotify/issues/214 for more details. - if err := watcher.Add(path.Dir(filePath)); err != nil { - return errors.Wrapf(err, "adding path %s to file watcher", filePath) - } return nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/httpconfig/http.go b/vendor/github.com/thanos-io/thanos/pkg/httpconfig/http.go index 404600f6214..14851519a50 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/httpconfig/http.go +++ b/vendor/github.com/thanos-io/thanos/pkg/httpconfig/http.go @@ -173,7 +173,11 @@ func NewRoundTripperFromConfig(cfg config_util.HTTPClientConfig, transportConfig return newRT(tlsConfig) } - return config_util.NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, cfg.TLSConfig.CertFile, cfg.TLSConfig.KeyFile, newRT) + return config_util.NewTLSRoundTripper(tlsConfig, config_util.TLSRoundTripperSettings{ + CAFile: cfg.TLSConfig.CAFile, + CertFile: cfg.TLSConfig.CertFile, + KeyFile: cfg.TLSConfig.KeyFile, + }, newRT) } // NewHTTPClient returns a new HTTP client. diff --git a/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go b/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go index a7eb98c8540..51173808e96 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go +++ b/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go @@ -40,6 +40,16 @@ type BucketedBytes struct { new func(s int) *[]byte } +// MustNewBucketedBytes is like NewBucketedBytes but panics if construction fails. +// Useful for package internal pools. +func MustNewBucketedBytes(minSize, maxSize int, factor float64, maxTotal uint64) *BucketedBytes { + pool, err := NewBucketedBytes(minSize, maxSize, factor, maxTotal) + if err != nil { + panic(err) + } + return pool +} + // NewBucketedBytes returns a new Bytes with size buckets for minSize to maxSize // increasing by the given factor and maximum number of used bytes. // No more than maxTotal bytes can be used at any given time unless maxTotal is set to 0. diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index ac7da8e613b..d2bac75f3ee 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -271,13 +271,13 @@ func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { }) m.seriesFetchDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ - Name: "thanos_bucket_store_cached_series_fetch_duration_seconds", + Name: "thanos_bucket_store_series_fetch_duration_seconds", Help: "The time it takes to fetch series to respond to a request sent to a store gateway. It includes both the time to fetch it from the cache and from storage in case of cache misses.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, }) m.postingsFetchDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ - Name: "thanos_bucket_store_cached_postings_fetch_duration_seconds", + Name: "thanos_bucket_store_postings_fetch_duration_seconds", Help: "The time it takes to fetch postings to respond to a request sent to a store gateway. It includes both the time to fetch it from the cache and from storage in case of cache misses.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, }) @@ -2457,13 +2457,13 @@ func (r *bucketIndexReader) fetchExpandedPostingsFromCache(ctx context.Context, }() // If failed to decode or expand cached postings, return and expand postings again. if err != nil { - level.Error(r.block.logger).Log("msg", "failed to decode cached expanded postings, refetch postings", "id", r.block.meta.ULID.String()) + level.Error(r.block.logger).Log("msg", "failed to decode cached expanded postings, refetch postings", "id", r.block.meta.ULID.String(), "err", err) return false, nil, nil } ps, err := ExpandPostingsWithContext(ctx, p) if err != nil { - level.Error(r.block.logger).Log("msg", "failed to expand cached expanded postings, refetch postings", "id", r.block.meta.ULID.String()) + level.Error(r.block.logger).Log("msg", "failed to expand cached expanded postings, refetch postings", "id", r.block.meta.ULID.String(), "err", err) return false, nil, nil } @@ -2527,6 +2527,9 @@ func (r *bucketIndexReader) fetchPostings(ctx context.Context, keys []labels.Lab // If we have a miss, mark key to be fetched in `ptrs` slice. // Overlaps are well handled by partitioner, so we don't need to deduplicate keys. for ix, key := range keys { + if err := ctx.Err(); err != nil { + return nil, closeFns, err + } // Get postings for the given key from cache first. if b, ok := fromCache[key]; ok { r.stats.postingsTouched++ diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/cachekey/cachekey.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/cachekey/cachekey.go index eb5438be04e..0393dbdb028 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/cachekey/cachekey.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/cachekey/cachekey.go @@ -20,11 +20,12 @@ var ( type VerbType string const ( - ExistsVerb VerbType = "exists" - ContentVerb VerbType = "content" - IterVerb VerbType = "iter" - AttributesVerb VerbType = "attrs" - SubrangeVerb VerbType = "subrange" + ExistsVerb VerbType = "exists" + ContentVerb VerbType = "content" + IterVerb VerbType = "iter" + IterRecursiveVerb VerbType = "iter-recursive" + AttributesVerb VerbType = "attrs" + SubrangeVerb VerbType = "subrange" ) type BucketCacheKey struct { @@ -50,6 +51,7 @@ func IsValidVerb(v VerbType) bool { ExistsVerb, ContentVerb, IterVerb, + IterRecursiveVerb, AttributesVerb, SubrangeVerb: return true diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/caching_bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/caching_bucket.go index cf8a2e4cd47..796036a9a29 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/caching_bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/caching_bucket.go @@ -132,7 +132,13 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er } cb.operationRequests.WithLabelValues(objstore.OpIter, cfgName).Inc() + iterVerb := cachekey.BucketCacheKey{Verb: cachekey.IterVerb, Name: dir} + opts := objstore.ApplyIterOptions(options...) + if opts.Recursive { + iterVerb.Verb = cachekey.IterRecursiveVerb + } + key := iterVerb.String() data := cfg.Cache.Fetch(ctx, []string{key}) if data[key] != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go b/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go index 2d9385bada8..b7318260470 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go @@ -7,6 +7,7 @@ import ( "bytes" "encoding/binary" "fmt" + "hash/crc32" "io" "sync" @@ -17,6 +18,7 @@ import ( "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/index" extsnappy "github.com/thanos-io/thanos/pkg/extgrpc/snappy" + "github.com/thanos-io/thanos/pkg/pool" ) // This file implements encoding and decoding of postings using diff (or delta) + varint @@ -138,41 +140,215 @@ func diffVarintSnappyStreamedDecode(input []byte, disablePooling bool) (closeabl } type streamedDiffVarintPostings struct { - cur storage.SeriesRef + curSeries storage.SeriesRef - sr io.ByteReader - err error + err error + input, buf []byte + maximumDecodedLen int + + db *encoding.Decbuf + + readSnappyIdentifier bool + disablePooling bool } -func newStreamedDiffVarintPostings(input []byte, disablePooling bool) (closeablePostings, error) { - if disablePooling { - return &streamedDiffVarintPostings{sr: s2.NewReader(bytes.NewBuffer(input))}, nil +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypeStreamIdentifier = 0xff + chunkTypePadding = 0xfe + checksumSize = 4 +) + +func maximumDecodedLenSnappyStreamed(in []byte) (int, error) { + maxDecodedLen := -1 + + for len(in) > 0 { + // Chunk type. + chunkType := in[0] + in = in[1:] + chunkLen := int(in[0]) | int(in[1])<<8 | int(in[2])<<16 + in = in[3:] + + switch chunkType { + case chunkTypeCompressedData: + bl := in[:chunkLen] + // NOTE: checksum will be checked later on. + decodedLen, err := s2.DecodedLen(bl[checksumSize:]) + if err != nil { + return 0, err + } + if decodedLen > maxDecodedLen { + maxDecodedLen = decodedLen + } + case chunkTypeUncompressedData: + // NOTE: checksum will be checked later on. + n := chunkLen - checksumSize + if n > maxDecodedLen { + maxDecodedLen = n + } + } + in = in[chunkLen:] } - r, err := extsnappy.Compressor.DecompressByteReader(bytes.NewBuffer(input)) + return maxDecodedLen, nil +} + +var decodedBufPool = pool.MustNewBucketedBytes(1024, 65536, 2, 0) + +func newStreamedDiffVarintPostings(input []byte, disablePooling bool) (closeablePostings, error) { + // We can't use the regular s2.Reader because it assumes a stream. + // We already everything in memory so let's avoid copying. + // Algorithm: + // 1. Step through all chunks all get maximum decoded len. + // 2. Read into decoded step by step. For decoding call s2.Decode(r.decoded, buf). + maximumDecodedLen, err := maximumDecodedLenSnappyStreamed(input) if err != nil { - return nil, fmt.Errorf("decompressing snappy postings: %w", err) + return nil, err } - return &streamedDiffVarintPostings{sr: r}, nil + return &streamedDiffVarintPostings{ + input: input, + maximumDecodedLen: maximumDecodedLen, + db: &encoding.Decbuf{}, + disablePooling: disablePooling, + }, nil } func (it *streamedDiffVarintPostings) close() { + if it.buf == nil { + return + } + if it.disablePooling { + return + } + decodedBufPool.Put(&it.buf) } func (it *streamedDiffVarintPostings) At() storage.SeriesRef { - return it.cur + return it.curSeries } -func (it *streamedDiffVarintPostings) Next() bool { - val, err := binary.ReadUvarint(it.sr) - if err != nil { - if err != io.EOF { +func (it *streamedDiffVarintPostings) readNextChunk() bool { + if len(it.db.B) > 0 { + return true + } + // Normal EOF. + if len(it.input) == 0 { + return false + } + + // Read next chunk into it.db.B. + chunkType := it.input[0] + it.input = it.input[1:] + + if len(it.input) < 3 { + it.err = io.ErrUnexpectedEOF + return false + } + + chunkLen := int(it.input[0]) | int(it.input[1])<<8 | int(it.input[2])<<16 + it.input = it.input[3:] + + switch chunkType { + case chunkTypeStreamIdentifier: + const magicBody = "sNaPpY" + if chunkLen != len(magicBody) { + it.err = fmt.Errorf("corrupted identifier") + return false + } + if string(it.input[:6]) != magicBody { + it.err = fmt.Errorf("got bad identifier %s", string(it.input[:6])) + return false + } + it.input = it.input[6:] + it.readSnappyIdentifier = true + return it.readNextChunk() + case chunkTypeCompressedData: + if !it.readSnappyIdentifier { + it.err = fmt.Errorf("missing magic snappy marker") + return false + } + if len(it.input) < 4 { + it.err = io.ErrUnexpectedEOF + return false + } + checksum := uint32(it.input[0]) | uint32(it.input[1])<<8 | uint32(it.input[2])<<16 | uint32(it.input[3])<<24 + if len(it.input) < chunkLen { + it.err = io.ErrUnexpectedEOF + return false + } + encodedBuf := it.input[:chunkLen] + + if it.buf == nil { + if it.disablePooling { + it.buf = make([]byte, it.maximumDecodedLen) + } else { + b, err := decodedBufPool.Get(it.maximumDecodedLen) + if err != nil { + it.err = err + return false + } + it.buf = *b + } + } + + decoded, err := s2.Decode(it.buf, encodedBuf[checksumSize:]) + if err != nil { it.err = err + return false + } + if crc(decoded) != checksum { + it.err = fmt.Errorf("mismatched checksum (got %v, expected %v)", crc(decoded), checksum) + return false + } + it.db.B = decoded + case chunkTypeUncompressedData: + if !it.readSnappyIdentifier { + it.err = fmt.Errorf("missing magic snappy marker") + return false + } + if len(it.input) < 4 { + it.err = io.ErrUnexpectedEOF + return false + } + checksum := uint32(it.input[0]) | uint32(it.input[1])<<8 | uint32(it.input[2])<<16 | uint32(it.input[3])<<24 + if len(it.input) < chunkLen { + it.err = io.ErrUnexpectedEOF + return false + } + it.db.B = it.input[checksumSize:chunkLen] + if crc(it.db.B) != checksum { + it.err = fmt.Errorf("mismatched checksum (got %v, expected %v)", crc(it.db.B), checksum) + return false + } + default: + if chunkType <= 0x7f { + it.err = fmt.Errorf("unsupported chunk type %v", chunkType) + return false + } + if chunkType > 0xfd { + it.err = fmt.Errorf("invalid chunk type %v", chunkType) + return false + } + } + it.input = it.input[chunkLen:] + + return true +} + +func (it *streamedDiffVarintPostings) Next() bool { + if !it.readNextChunk() { + return false + } + val := it.db.Uvarint() + if it.db.Err() != nil { + if it.db.Err() != io.EOF { + it.err = it.db.Err() } return false } - it.cur = it.cur + storage.SeriesRef(val) + it.curSeries = it.curSeries + storage.SeriesRef(val) return true } @@ -181,7 +357,7 @@ func (it *streamedDiffVarintPostings) Err() error { } func (it *streamedDiffVarintPostings) Seek(x storage.SeriesRef) bool { - if it.cur >= x { + if it.curSeries >= x { return true } @@ -368,3 +544,12 @@ func snappyStreamedEncode(postingsLength int, diffVarintPostings []byte) ([]byte return compressedBuf.Bytes(), nil } + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/custom.go new file mode 100644 index 00000000000..fb3b395a9a6 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/custom.go @@ -0,0 +1,9 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package prompb + +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go index a276d9264ea..050b8e912f4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go @@ -62,71 +62,124 @@ func SamplesFromPromqlSeries(series promql.Series) ([]Sample, []Histogram) { // HistogramProtoToHistogram extracts a (normal integer) Histogram from the // provided proto message. The caller has to make sure that the proto message // represents an interger histogram and not a float histogram. -// Taken from https://github.com/prometheus/prometheus/blob/d33eb3ab17616a54b97d9f7791c791a79823f279/storage/remote/codec.go#L529-L542. +// Copied from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L626-L645 func HistogramProtoToHistogram(hp Histogram) *histogram.Histogram { + if hp.IsFloatHistogram() { + panic("HistogramProtoToHistogram called with a float histogram") + } return &histogram.Histogram{ - Schema: hp.Schema, - ZeroThreshold: hp.ZeroThreshold, - ZeroCount: hp.GetZeroCountInt(), - Count: hp.GetCountInt(), - Sum: hp.Sum, - PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), - PositiveBuckets: hp.GetPositiveDeltas(), - NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), - NegativeBuckets: hp.GetNegativeDeltas(), + CounterResetHint: histogram.CounterResetHint(hp.ResetHint), + Schema: hp.Schema, + ZeroThreshold: hp.ZeroThreshold, + ZeroCount: hp.GetZeroCountInt(), + Count: hp.GetCountInt(), + Sum: hp.Sum, + PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), + PositiveBuckets: hp.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), + NegativeBuckets: hp.GetNegativeDeltas(), } } // FloatHistogramToHistogramProto converts a float histogram to a protobuf type. -// Taken from https://github.com/prometheus/prometheus/blob/d33eb3ab17616a54b97d9f7791c791a79823f279/storage/remote/codec.go#L587-L601. -func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) Histogram { - return Histogram{ - Count: &Histogram_CountFloat{CountFloat: fh.Count}, - Sum: fh.Sum, - Schema: fh.Schema, - ZeroThreshold: fh.ZeroThreshold, - ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount}, - NegativeSpans: spansToSpansProto(fh.NegativeSpans), - NegativeCounts: fh.NegativeBuckets, - PositiveSpans: spansToSpansProto(fh.PositiveSpans), - PositiveCounts: fh.PositiveBuckets, - ResetHint: Histogram_ResetHint(fh.CounterResetHint), - Timestamp: timestamp, +// Copied from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L647-L667 +func FloatHistogramProtoToFloatHistogram(hp Histogram) *histogram.FloatHistogram { + if !hp.IsFloatHistogram() { + panic("FloatHistogramProtoToFloatHistogram called with an integer histogram") + } + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(hp.ResetHint), + Schema: hp.Schema, + ZeroThreshold: hp.ZeroThreshold, + ZeroCount: hp.GetZeroCountFloat(), + Count: hp.GetCountFloat(), + Sum: hp.Sum, + PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), + PositiveBuckets: hp.GetPositiveCounts(), + NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), + NegativeBuckets: hp.GetNegativeCounts(), } } // HistogramProtoToFloatHistogram extracts a (normal integer) Histogram from the // provided proto message to a Float Histogram. The caller has to make sure that // the proto message represents an float histogram and not a integer histogram. -// Taken from https://github.com/prometheus/prometheus/blob/d33eb3ab17616a54b97d9f7791c791a79823f279/storage/remote/codec.go#L547-L560. +// Copied from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L669-L688 func HistogramProtoToFloatHistogram(hp Histogram) *histogram.FloatHistogram { + if hp.IsFloatHistogram() { + panic("HistogramProtoToFloatHistogram called with a float histogram") + } return &histogram.FloatHistogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), Schema: hp.Schema, ZeroThreshold: hp.ZeroThreshold, - ZeroCount: hp.GetZeroCountFloat(), - Count: hp.GetCountFloat(), + ZeroCount: float64(hp.GetZeroCountInt()), + Count: float64(hp.GetCountInt()), Sum: hp.Sum, PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), - PositiveBuckets: hp.GetPositiveCounts(), + PositiveBuckets: deltasToCounts(hp.GetPositiveDeltas()), NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), - NegativeBuckets: hp.GetNegativeCounts(), + NegativeBuckets: deltasToCounts(hp.GetNegativeDeltas()), } } -func spansToSpansProto(s []histogram.Span) []*BucketSpan { - spans := make([]*BucketSpan, len(s)) +func spansProtoToSpans(s []BucketSpan) []histogram.Span { + spans := make([]histogram.Span, len(s)) for i := 0; i < len(s); i++ { - spans[i] = &BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} } return spans } -func spansProtoToSpans(s []*BucketSpan) []histogram.Span { - spans := make([]histogram.Span, len(s)) +func deltasToCounts(deltas []int64) []float64 { + counts := make([]float64, len(deltas)) + var cur float64 + for i, d := range deltas { + cur += float64(d) + counts[i] = cur + } + return counts +} + +// Copied from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L709-L723 +func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) Histogram { + return Histogram{ + Count: &Histogram_CountInt{CountInt: h.Count}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount}, + NegativeSpans: spansToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeBuckets, + PositiveSpans: spansToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveBuckets, + ResetHint: Histogram_ResetHint(h.CounterResetHint), + Timestamp: timestamp, + } +} + +// Copied from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L725-L739 +func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) Histogram { + return Histogram{ + Count: &Histogram_CountFloat{CountFloat: fh.Count}, + Sum: fh.Sum, + Schema: fh.Schema, + ZeroThreshold: fh.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount}, + NegativeSpans: spansToSpansProto(fh.NegativeSpans), + NegativeCounts: fh.NegativeBuckets, + PositiveSpans: spansToSpansProto(fh.PositiveSpans), + PositiveCounts: fh.PositiveBuckets, + ResetHint: Histogram_ResetHint(fh.CounterResetHint), + Timestamp: timestamp, + } +} + +func spansToSpansProto(s []histogram.Span) []BucketSpan { + spans := make([]BucketSpan, len(s)) for i := 0; i < len(s); i++ { - spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} + spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} } return spans diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go index f5332f345ba..42c1b0fd9ce 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go @@ -367,14 +367,14 @@ type Histogram struct { // *Histogram_ZeroCountFloat ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` // Negative Buckets. - NegativeSpans []*BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans,omitempty"` + NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"` // Use either "negative_deltas" or "negative_counts", the former for // regular histograms with integer counts, the latter for float // histograms. NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` // Positive Buckets. - PositiveSpans []*BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans,omitempty"` + PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"` // Use either "positive_deltas" or "positive_counts", the former for // regular histograms with integer counts, the latter for float // histograms. @@ -510,7 +510,7 @@ func (m *Histogram) GetZeroCountFloat() float64 { return 0 } -func (m *Histogram) GetNegativeSpans() []*BucketSpan { +func (m *Histogram) GetNegativeSpans() []BucketSpan { if m != nil { return m.NegativeSpans } @@ -531,7 +531,7 @@ func (m *Histogram) GetNegativeCounts() []float64 { return nil } -func (m *Histogram) GetPositiveSpans() []*BucketSpan { +func (m *Histogram) GetPositiveSpans() []BucketSpan { if m != nil { return m.PositiveSpans } @@ -988,79 +988,79 @@ func init() { func init() { proto.RegisterFile("store/storepb/prompb/types.proto", fileDescriptor_166e07899dab7c14) } var fileDescriptor_166e07899dab7c14 = []byte{ - // 1139 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x8f, 0x1a, 0xc7, - 0x13, 0x67, 0x66, 0x60, 0x60, 0x6a, 0x01, 0x8f, 0x5b, 0xfe, 0xdb, 0xe3, 0xfd, 0x27, 0x2c, 0x19, - 0xe5, 0x81, 0xac, 0x04, 0x24, 0x7b, 0x95, 0x5c, 0x36, 0x51, 0x96, 0x0d, 0xfb, 0x50, 0x02, 0xc8, - 0x0d, 0xab, 0xc4, 0xbe, 0xa0, 0x06, 0x7a, 0x99, 0xd1, 0x32, 0x0f, 0x4d, 0x37, 0xd6, 0x92, 0x4f, - 0x91, 0x73, 0x6e, 0x51, 0x6e, 0xc9, 0x29, 0x1f, 0x21, 0x37, 0x9f, 0x22, 0x1f, 0xa3, 0x1c, 0xac, - 0x68, 0xf7, 0x8b, 0x44, 0xdd, 0x33, 0xc3, 0xc0, 0x12, 0x4b, 0x39, 0xf9, 0x82, 0xaa, 0x7e, 0xf5, - 0xfa, 0x4d, 0x57, 0x75, 0x35, 0x50, 0x67, 0x3c, 0x88, 0x68, 0x4b, 0xfe, 0x86, 0xe3, 0x56, 0x18, - 0x05, 0x5e, 0x38, 0x6e, 0xf1, 0x65, 0x48, 0x59, 0x33, 0x8c, 0x02, 0x1e, 0xa0, 0x3b, 0x02, 0xa3, - 0xdc, 0xa1, 0x0b, 0x36, 0x9a, 0x04, 0xe1, 0x72, 0xf7, 0xde, 0x2c, 0x98, 0x05, 0xd2, 0xd6, 0x12, - 0x52, 0xec, 0xb6, 0xfb, 0x30, 0x4e, 0x34, 0x27, 0x63, 0x3a, 0xdf, 0xcc, 0x60, 0xff, 0xa4, 0x42, - 0xb5, 0x4b, 0x79, 0xe4, 0x4e, 0xba, 0x94, 0x93, 0x29, 0xe1, 0x04, 0x7d, 0x01, 0x79, 0xe1, 0x61, - 0x29, 0x75, 0xa5, 0x51, 0x7d, 0xfc, 0xa8, 0x79, 0xab, 0x46, 0x73, 0xd3, 0x3d, 0x51, 0x87, 0xcb, - 0x90, 0x62, 0x19, 0x87, 0x3e, 0x06, 0xe4, 0x49, 0x6c, 0x74, 0x41, 0x3c, 0x77, 0xbe, 0x1c, 0xf9, - 0xc4, 0xa3, 0x96, 0x5a, 0x57, 0x1a, 0x06, 0x36, 0x63, 0xcb, 0xb1, 0x34, 0xf4, 0x88, 0x47, 0x11, - 0x82, 0xbc, 0x43, 0xe7, 0xa1, 0x95, 0x97, 0x76, 0x29, 0x0b, 0x6c, 0xe1, 0xbb, 0xdc, 0x2a, 0xc4, - 0x98, 0x90, 0xed, 0x25, 0x40, 0x56, 0x09, 0xed, 0x40, 0xf1, 0xbc, 0xf7, 0x75, 0xaf, 0xff, 0x6d, - 0xcf, 0xcc, 0x09, 0xe5, 0xa8, 0x7f, 0xde, 0x1b, 0x76, 0xb0, 0xa9, 0x20, 0x03, 0x0a, 0x27, 0x87, - 0xe7, 0x27, 0x1d, 0x53, 0x45, 0x15, 0x30, 0x4e, 0xcf, 0x06, 0xc3, 0xfe, 0x09, 0x3e, 0xec, 0x9a, - 0x1a, 0x42, 0x50, 0x95, 0x96, 0x0c, 0xcb, 0x8b, 0xd0, 0xc1, 0x79, 0xb7, 0x7b, 0x88, 0x9f, 0x99, - 0x05, 0x54, 0x82, 0xfc, 0x59, 0xef, 0xb8, 0x6f, 0xea, 0xa8, 0x0c, 0xa5, 0xc1, 0xf0, 0x70, 0xd8, - 0x19, 0x74, 0x86, 0x66, 0xd1, 0x3e, 0x00, 0x7d, 0x40, 0xbc, 0x70, 0x4e, 0xd1, 0x3d, 0x28, 0xbc, - 0x20, 0xf3, 0x45, 0x7c, 0x36, 0x0a, 0x8e, 0x15, 0xf4, 0x0e, 0x18, 0xdc, 0xf5, 0x28, 0xe3, 0xc4, - 0x0b, 0xe5, 0x77, 0x6a, 0x38, 0x03, 0xec, 0x9f, 0x15, 0x28, 0x75, 0xae, 0xa8, 0x17, 0xce, 0x49, - 0x84, 0x26, 0xa0, 0xcb, 0x2e, 0x30, 0x4b, 0xa9, 0x6b, 0x8d, 0x9d, 0xc7, 0x95, 0x26, 0x77, 0x88, - 0x1f, 0xb0, 0xe6, 0x37, 0x02, 0x6d, 0x1f, 0xbc, 0x7c, 0xbd, 0x97, 0xfb, 0xeb, 0xf5, 0xde, 0xfe, - 0xcc, 0xe5, 0xce, 0x62, 0xdc, 0x9c, 0x04, 0x5e, 0x2b, 0x76, 0xf8, 0xc4, 0x0d, 0x12, 0xa9, 0x15, - 0x5e, 0xce, 0x5a, 0x1b, 0x0d, 0x6d, 0x3e, 0x97, 0xd1, 0x38, 0x49, 0x9d, 0xb1, 0x54, 0xdf, 0xc8, - 0x52, 0xbb, 0xcd, 0xf2, 0x8f, 0x02, 0x18, 0xa7, 0x2e, 0xe3, 0xc1, 0x2c, 0x22, 0x1e, 0x7a, 0x17, - 0x8c, 0x49, 0xb0, 0xf0, 0xf9, 0xc8, 0xf5, 0xb9, 0xfc, 0xd6, 0xfc, 0x69, 0x0e, 0x97, 0x24, 0x74, - 0xe6, 0x73, 0xf4, 0x1e, 0xec, 0xc4, 0xe6, 0x8b, 0x79, 0x40, 0x78, 0x5c, 0xe6, 0x34, 0x87, 0x41, - 0x82, 0xc7, 0x02, 0x43, 0x26, 0x68, 0x6c, 0xe1, 0xc9, 0x3a, 0x0a, 0x16, 0x22, 0xba, 0x0f, 0x3a, - 0x9b, 0x38, 0xd4, 0x23, 0xb2, 0xd5, 0x77, 0x71, 0xa2, 0xa1, 0x0f, 0xa0, 0xfa, 0x3d, 0x8d, 0x82, - 0x11, 0x77, 0x22, 0xca, 0x9c, 0x60, 0x3e, 0x95, 0x6d, 0x57, 0x70, 0x45, 0xa0, 0xc3, 0x14, 0x44, - 0x1f, 0x26, 0x6e, 0x19, 0x2f, 0x5d, 0xf2, 0x52, 0x70, 0x59, 0xe0, 0x47, 0x29, 0xb7, 0x47, 0x60, - 0xae, 0xf9, 0xc5, 0x04, 0x8b, 0x92, 0xa0, 0x82, 0xab, 0x2b, 0xcf, 0x98, 0x64, 0x1b, 0xaa, 0x3e, - 0x9d, 0x11, 0xee, 0xbe, 0xa0, 0x23, 0x16, 0x12, 0x9f, 0x59, 0x25, 0xd9, 0x95, 0xff, 0x6f, 0xcd, - 0x7c, 0x7b, 0x31, 0xb9, 0xa4, 0x7c, 0x10, 0x12, 0x1f, 0x57, 0xd2, 0x10, 0xa1, 0x31, 0xf4, 0x11, - 0xdc, 0x59, 0xe5, 0x98, 0xd2, 0x39, 0x27, 0xcc, 0x32, 0xea, 0x5a, 0x03, 0xe1, 0x55, 0xea, 0xaf, - 0x24, 0xba, 0xe1, 0x28, 0xc9, 0x31, 0x0b, 0xea, 0x5a, 0x43, 0xc9, 0x1c, 0x25, 0x33, 0x26, 0x58, - 0x85, 0x01, 0x73, 0xd7, 0x58, 0xed, 0xfc, 0x07, 0x56, 0x69, 0xc8, 0x8a, 0xd5, 0x2a, 0x47, 0xc2, - 0xaa, 0x1c, 0xb3, 0x4a, 0xe1, 0x8c, 0xd5, 0xca, 0x31, 0x61, 0x55, 0x89, 0x59, 0xa5, 0x70, 0xc2, - 0xea, 0x08, 0x20, 0xa2, 0x8c, 0xf2, 0x91, 0x23, 0xce, 0xbe, 0x2a, 0x77, 0xc3, 0xfb, 0x5b, 0x8c, - 0x56, 0x23, 0xd4, 0xc4, 0xc2, 0xf9, 0xd4, 0xf5, 0x39, 0x36, 0xa2, 0x54, 0xdc, 0x9c, 0xc1, 0x3b, - 0xb7, 0x67, 0x70, 0x1f, 0x8c, 0x55, 0xd4, 0xe6, 0x0d, 0x2f, 0x82, 0xf6, 0xac, 0x33, 0x30, 0x15, - 0xa4, 0x83, 0xda, 0xeb, 0x9b, 0x6a, 0x76, 0xcb, 0xb5, 0x76, 0x11, 0x0a, 0x92, 0x78, 0xbb, 0x0c, - 0x90, 0x75, 0xde, 0x3e, 0x00, 0xc8, 0x8e, 0x47, 0x0c, 0x5f, 0x70, 0x71, 0xc1, 0x68, 0x3c, 0xcd, - 0x77, 0x71, 0xa2, 0x09, 0x7c, 0x4e, 0xfd, 0x19, 0x77, 0xe4, 0x10, 0x57, 0x70, 0xa2, 0xd9, 0xbf, - 0xaa, 0x00, 0x43, 0xd7, 0xa3, 0x03, 0x1a, 0xb9, 0x94, 0xbd, 0x9d, 0x6b, 0xfb, 0x19, 0x14, 0x99, - 0x5c, 0x33, 0xcc, 0x52, 0x65, 0x95, 0x07, 0x5b, 0xc7, 0x1b, 0xaf, 0xa1, 0x76, 0x5e, 0xd4, 0xc3, - 0xa9, 0x37, 0xfa, 0x1c, 0x0c, 0x9a, 0x2c, 0x18, 0x66, 0x69, 0x32, 0xf4, 0xe1, 0x56, 0x68, 0xba, - 0x82, 0x92, 0xe0, 0x2c, 0x02, 0x7d, 0x09, 0xe0, 0xa4, 0x6d, 0x63, 0x56, 0x5e, 0xc6, 0xef, 0xbe, - 0xb9, 0xb3, 0x49, 0x82, 0xb5, 0x18, 0xfb, 0x47, 0x05, 0xca, 0xf2, 0x5b, 0xba, 0x84, 0x4f, 0x1c, - 0x1a, 0xa1, 0x4f, 0x37, 0x9e, 0x10, 0x7b, 0x2b, 0xd9, 0xba, 0x73, 0x73, 0xed, 0xe9, 0x40, 0x90, - 0x5f, 0x7b, 0x2c, 0xa4, 0x9c, 0x6d, 0x33, 0x4d, 0x82, 0xb1, 0x62, 0x37, 0x20, 0x2f, 0x1f, 0x02, - 0x1d, 0xd4, 0xce, 0xd3, 0x78, 0x42, 0x7a, 0x9d, 0xa7, 0xf1, 0x84, 0x60, 0xb1, 0xfc, 0x05, 0x80, - 0x3b, 0xa6, 0x66, 0xff, 0xa6, 0x88, 0xb1, 0x22, 0x53, 0x31, 0x55, 0x0c, 0x3d, 0x80, 0x22, 0xe3, - 0x34, 0x1c, 0x79, 0x4c, 0x92, 0xd3, 0xb0, 0x2e, 0xd4, 0x2e, 0x13, 0xa5, 0x2f, 0x16, 0xfe, 0x24, - 0x2d, 0x2d, 0x64, 0xf4, 0x10, 0x4a, 0x8c, 0x93, 0x88, 0x0b, 0xef, 0x78, 0x63, 0x16, 0xa5, 0xde, - 0x65, 0xe8, 0x7f, 0xa0, 0x53, 0x7f, 0x3a, 0x92, 0x07, 0x26, 0x0c, 0x05, 0xea, 0x4f, 0xbb, 0x0c, - 0xed, 0x42, 0x69, 0x16, 0x05, 0x8b, 0xd0, 0xf5, 0x67, 0x56, 0xa1, 0xae, 0x35, 0x0c, 0xbc, 0xd2, - 0x51, 0x15, 0xd4, 0xf1, 0x52, 0x6e, 0xad, 0x12, 0x56, 0xc7, 0x4b, 0x91, 0x3d, 0x22, 0xfe, 0x8c, - 0x8a, 0x24, 0xc5, 0x38, 0xbb, 0xd4, 0xbb, 0xcc, 0xfe, 0x5d, 0x81, 0xc2, 0x91, 0xb3, 0xf0, 0x2f, - 0x51, 0x0d, 0x76, 0x3c, 0xd7, 0x1f, 0x89, 0x4b, 0x92, 0x71, 0x36, 0x3c, 0xd7, 0x17, 0xd3, 0xd9, - 0x65, 0xd2, 0x4e, 0xae, 0x56, 0xf6, 0xe4, 0xf5, 0xf1, 0xc8, 0x55, 0x62, 0x7f, 0x92, 0x74, 0x42, - 0x93, 0x9d, 0xd8, 0xdb, 0xea, 0x84, 0xac, 0xd2, 0xec, 0xf8, 0x93, 0x60, 0xea, 0xfa, 0xb3, 0xac, - 0x0d, 0xe2, 0x69, 0x97, 0x9f, 0x56, 0xc6, 0x52, 0xb6, 0x5b, 0x50, 0x4a, 0xbd, 0xb6, 0xee, 0xe6, - 0x77, 0x7d, 0xf1, 0xf2, 0x6e, 0x3c, 0xb7, 0xaa, 0xfd, 0x8b, 0x02, 0x15, 0x99, 0x9d, 0x4e, 0xdf, - 0xe6, 0x2d, 0xda, 0x07, 0x7d, 0x22, 0xaa, 0xa6, 0x97, 0xe8, 0xfe, 0xbf, 0x7f, 0x72, 0x32, 0xc5, - 0x89, 0x6f, 0xbb, 0xfe, 0xf2, 0xba, 0xa6, 0xbc, 0xba, 0xae, 0x29, 0x7f, 0x5f, 0xd7, 0x94, 0x1f, - 0x6e, 0x6a, 0xb9, 0x57, 0x37, 0xb5, 0xdc, 0x9f, 0x37, 0xb5, 0xdc, 0x73, 0x3d, 0xfe, 0xdb, 0x35, - 0xd6, 0xe5, 0xff, 0xa5, 0x27, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x0e, 0x67, 0xfa, 0x2f, 0x95, - 0x09, 0x00, 0x00, + // 1140 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xcf, 0x8f, 0xda, 0xc6, + 0x17, 0xc7, 0x36, 0x18, 0xfc, 0x16, 0x88, 0x33, 0xca, 0x37, 0x71, 0xf6, 0xdb, 0x12, 0x6a, 0xf5, + 0x07, 0x8a, 0x5a, 0x90, 0x92, 0xa8, 0xbd, 0x6c, 0xab, 0x2e, 0x5b, 0x76, 0x59, 0xb5, 0x80, 0x32, + 0xb0, 0x6a, 0x93, 0x0b, 0x1a, 0x60, 0x16, 0x5b, 0x8b, 0x7f, 0xc8, 0x33, 0x44, 0x4b, 0xff, 0x8a, + 0x9e, 0x7b, 0xab, 0x7a, 0x6b, 0x4f, 0xfd, 0x13, 0x7a, 0xcb, 0x31, 0xc7, 0x2a, 0x87, 0xa8, 0xda, + 0xfd, 0x47, 0xaa, 0x19, 0xdb, 0x18, 0x96, 0x46, 0xea, 0x29, 0x17, 0xf4, 0xde, 0xe7, 0xfd, 0xfa, + 0x8c, 0xdf, 0x9b, 0x79, 0x40, 0x9d, 0xf1, 0x20, 0xa2, 0x2d, 0xf9, 0x1b, 0x4e, 0x5a, 0x61, 0x14, + 0x78, 0xe1, 0xa4, 0xc5, 0x57, 0x21, 0x65, 0xcd, 0x30, 0x0a, 0x78, 0x80, 0x6e, 0x09, 0x8c, 0x72, + 0x87, 0x2e, 0xd9, 0x78, 0x1a, 0x84, 0xab, 0xfd, 0x3b, 0xf3, 0x60, 0x1e, 0x48, 0x5b, 0x4b, 0x48, + 0xb1, 0xdb, 0xfe, 0xfd, 0x38, 0xd1, 0x82, 0x4c, 0xe8, 0x62, 0x3b, 0x83, 0xfd, 0x8b, 0x0a, 0xd5, + 0x1e, 0xe5, 0x91, 0x3b, 0xed, 0x51, 0x4e, 0x66, 0x84, 0x13, 0xf4, 0x15, 0xe4, 0x85, 0x87, 0xa5, + 0xd4, 0x95, 0x46, 0xf5, 0xd1, 0xc3, 0xe6, 0x8d, 0x1a, 0xcd, 0x6d, 0xf7, 0x44, 0x1d, 0xad, 0x42, + 0x8a, 0x65, 0x1c, 0xfa, 0x14, 0x90, 0x27, 0xb1, 0xf1, 0x39, 0xf1, 0xdc, 0xc5, 0x6a, 0xec, 0x13, + 0x8f, 0x5a, 0x6a, 0x5d, 0x69, 0x18, 0xd8, 0x8c, 0x2d, 0xc7, 0xd2, 0xd0, 0x27, 0x1e, 0x45, 0x08, + 0xf2, 0x0e, 0x5d, 0x84, 0x56, 0x5e, 0xda, 0xa5, 0x2c, 0xb0, 0xa5, 0xef, 0x72, 0xab, 0x10, 0x63, + 0x42, 0xb6, 0x57, 0x00, 0x59, 0x25, 0xb4, 0x07, 0xc5, 0xb3, 0xfe, 0xb7, 0xfd, 0xc1, 0xf7, 0x7d, + 0x33, 0x27, 0x94, 0xa3, 0xc1, 0x59, 0x7f, 0xd4, 0xc1, 0xa6, 0x82, 0x0c, 0x28, 0x9c, 0x1c, 0x9e, + 0x9d, 0x74, 0x4c, 0x15, 0x55, 0xc0, 0xe8, 0x9e, 0x0e, 0x47, 0x83, 0x13, 0x7c, 0xd8, 0x33, 0x35, + 0x84, 0xa0, 0x2a, 0x2d, 0x19, 0x96, 0x17, 0xa1, 0xc3, 0xb3, 0x5e, 0xef, 0x10, 0x3f, 0x33, 0x0b, + 0xa8, 0x04, 0xf9, 0xd3, 0xfe, 0xf1, 0xc0, 0xd4, 0x51, 0x19, 0x4a, 0xc3, 0xd1, 0xe1, 0xa8, 0x33, + 0xec, 0x8c, 0xcc, 0xa2, 0x7d, 0x00, 0xfa, 0x90, 0x78, 0xe1, 0x82, 0xa2, 0x3b, 0x50, 0x78, 0x41, + 0x16, 0xcb, 0xf8, 0xdb, 0x28, 0x38, 0x56, 0xd0, 0x7b, 0x60, 0x70, 0xd7, 0xa3, 0x8c, 0x13, 0x2f, + 0x94, 0xe7, 0xd4, 0x70, 0x06, 0xd8, 0xbf, 0x2a, 0x50, 0xea, 0x5c, 0x52, 0x2f, 0x5c, 0x90, 0x08, + 0x4d, 0x41, 0x97, 0x5d, 0x60, 0x96, 0x52, 0xd7, 0x1a, 0x7b, 0x8f, 0x2a, 0x4d, 0xee, 0x10, 0x3f, + 0x60, 0xcd, 0xef, 0x04, 0xda, 0x3e, 0x78, 0xf9, 0xe6, 0x41, 0xee, 0xf5, 0x9b, 0x07, 0x4f, 0xe6, + 0x2e, 0x77, 0x96, 0x93, 0xe6, 0x34, 0xf0, 0x5a, 0xb1, 0xc3, 0x67, 0x6e, 0x90, 0x48, 0xad, 0xf0, + 0x62, 0xde, 0xda, 0x6a, 0x68, 0xf3, 0xb9, 0x8c, 0xc6, 0x49, 0xea, 0x8c, 0xa5, 0xfa, 0x56, 0x96, + 0xda, 0x4d, 0x96, 0xaf, 0x0b, 0x60, 0x74, 0x5d, 0xc6, 0x83, 0x79, 0x44, 0x3c, 0xf4, 0x3e, 0x18, + 0xd3, 0x60, 0xe9, 0xf3, 0xb1, 0xeb, 0x73, 0x79, 0xd6, 0x7c, 0x37, 0x87, 0x4b, 0x12, 0x3a, 0xf5, + 0x39, 0xfa, 0x00, 0xf6, 0x62, 0xf3, 0xf9, 0x22, 0x20, 0x3c, 0x2e, 0xd3, 0xcd, 0x61, 0x90, 0xe0, + 0xb1, 0xc0, 0x90, 0x09, 0x1a, 0x5b, 0x7a, 0xb2, 0x8e, 0x82, 0x85, 0x88, 0xee, 0x82, 0xce, 0xa6, + 0x0e, 0xf5, 0x88, 0x6c, 0xf5, 0x6d, 0x9c, 0x68, 0xe8, 0x23, 0xa8, 0xfe, 0x48, 0xa3, 0x60, 0xcc, + 0x9d, 0x88, 0x32, 0x27, 0x58, 0xcc, 0x64, 0xdb, 0x15, 0x5c, 0x11, 0xe8, 0x28, 0x05, 0xd1, 0xc7, + 0x89, 0x5b, 0xc6, 0x4b, 0x97, 0xbc, 0x14, 0x5c, 0x16, 0xf8, 0x51, 0xca, 0xed, 0x21, 0x98, 0x1b, + 0x7e, 0x31, 0xc1, 0xa2, 0x24, 0xa8, 0xe0, 0xea, 0xda, 0x33, 0x26, 0xd9, 0x85, 0xaa, 0x4f, 0xe7, + 0x84, 0xbb, 0x2f, 0xe8, 0x98, 0x85, 0xc4, 0x67, 0x56, 0x49, 0x76, 0xe5, 0xff, 0x3b, 0x33, 0xdf, + 0x5e, 0x4e, 0x2f, 0x28, 0x1f, 0x86, 0xc4, 0x6f, 0xe7, 0x45, 0x8f, 0x70, 0x25, 0x0d, 0x14, 0x18, + 0x43, 0x9f, 0xc0, 0xad, 0x75, 0xa6, 0x19, 0x5d, 0x70, 0xc2, 0x2c, 0xa3, 0xae, 0x35, 0x10, 0x5e, + 0x17, 0xf8, 0x46, 0xa2, 0x5b, 0x8e, 0x92, 0x22, 0xb3, 0xa0, 0xae, 0x35, 0x94, 0xcc, 0x51, 0xf2, + 0x63, 0x82, 0x5b, 0x18, 0x30, 0x77, 0x83, 0xdb, 0xde, 0x7f, 0xe6, 0x96, 0x06, 0xae, 0xb9, 0xad, + 0x33, 0x25, 0xdc, 0xca, 0x31, 0xb7, 0x14, 0xce, 0xb8, 0xad, 0x1d, 0x13, 0x6e, 0x95, 0x98, 0x5b, + 0x0a, 0x27, 0xdc, 0x8e, 0x00, 0x22, 0xca, 0x28, 0x1f, 0x3b, 0xa2, 0x0f, 0x55, 0xf9, 0x4e, 0x7c, + 0xb8, 0xc3, 0x6b, 0x3d, 0x4e, 0x4d, 0x2c, 0x9c, 0xbb, 0xae, 0xcf, 0xb1, 0x11, 0xa5, 0xe2, 0xf6, + 0x3c, 0xde, 0xba, 0x39, 0x8f, 0x4f, 0xc0, 0x58, 0x47, 0x6d, 0xdf, 0xf6, 0x22, 0x68, 0xcf, 0x3a, + 0x43, 0x53, 0x41, 0x3a, 0xa8, 0xfd, 0x81, 0xa9, 0x66, 0x37, 0x5e, 0x6b, 0x17, 0xa1, 0x20, 0x89, + 0xb7, 0xcb, 0x00, 0xd9, 0x14, 0xd8, 0x07, 0x00, 0xd9, 0x47, 0x12, 0x83, 0x18, 0x9c, 0x9f, 0x33, + 0x1a, 0x4f, 0xf6, 0x6d, 0x9c, 0x68, 0x02, 0x5f, 0x50, 0x7f, 0xce, 0x1d, 0x39, 0xd0, 0x15, 0x9c, + 0x68, 0xf6, 0xef, 0x2a, 0xc0, 0xc8, 0xf5, 0xe8, 0x90, 0x46, 0x2e, 0x65, 0xef, 0xe6, 0x0a, 0x7f, + 0x01, 0x45, 0x26, 0x9f, 0x1c, 0x66, 0xa9, 0xb2, 0xca, 0xbd, 0x9d, 0xcf, 0x1b, 0x3f, 0x49, 0x49, + 0xcb, 0x53, 0x6f, 0xf4, 0x25, 0x18, 0x34, 0x79, 0x6c, 0x98, 0xa5, 0xc9, 0xd0, 0xfb, 0x3b, 0xa1, + 0xe9, 0x73, 0x94, 0x04, 0x67, 0x11, 0xe8, 0x6b, 0x00, 0x27, 0x6d, 0x1b, 0xb3, 0xf2, 0x32, 0x7e, + 0xff, 0xed, 0x9d, 0x4d, 0x12, 0x6c, 0xc4, 0xd8, 0x3f, 0x2b, 0x50, 0x96, 0x67, 0xe9, 0x11, 0x3e, + 0x75, 0x68, 0x84, 0x3e, 0xdf, 0x5a, 0x27, 0xf6, 0x4e, 0xb2, 0x4d, 0xe7, 0xe6, 0xc6, 0x1a, 0x41, + 0x90, 0xdf, 0x58, 0x1c, 0x52, 0xce, 0x5e, 0x36, 0x4d, 0x82, 0xb1, 0x62, 0x37, 0x20, 0x2f, 0x97, + 0x82, 0x0e, 0x6a, 0xe7, 0x69, 0x3c, 0x21, 0xfd, 0xce, 0xd3, 0x78, 0x42, 0xb0, 0x58, 0x04, 0x02, + 0xc0, 0x1d, 0x53, 0xb3, 0xff, 0x50, 0xc4, 0x58, 0x91, 0x99, 0x98, 0x2a, 0x86, 0xee, 0x41, 0x91, + 0x71, 0x1a, 0x8e, 0x3d, 0x26, 0xc9, 0x69, 0x58, 0x17, 0x6a, 0x8f, 0x89, 0xd2, 0xe7, 0x4b, 0x7f, + 0x9a, 0x96, 0x16, 0x32, 0xba, 0x0f, 0x25, 0xc6, 0x49, 0xc4, 0x85, 0x77, 0xfc, 0x7a, 0x16, 0xa5, + 0xde, 0x63, 0xe8, 0x7f, 0xa0, 0x53, 0x7f, 0x36, 0x96, 0x1f, 0x4c, 0x18, 0x0a, 0xd4, 0x9f, 0xf5, + 0x18, 0xda, 0x87, 0xd2, 0x3c, 0x0a, 0x96, 0xa1, 0xeb, 0xcf, 0xad, 0x42, 0x5d, 0x6b, 0x18, 0x78, + 0xad, 0xa3, 0x2a, 0xa8, 0x93, 0x95, 0x7c, 0xc1, 0x4a, 0x58, 0x9d, 0xac, 0x44, 0xf6, 0x88, 0xf8, + 0x73, 0x2a, 0x92, 0x14, 0xe3, 0xec, 0x52, 0xef, 0x31, 0xfb, 0x4f, 0x05, 0x0a, 0x47, 0xce, 0xd2, + 0xbf, 0x40, 0x35, 0xd8, 0xf3, 0x5c, 0x7f, 0x2c, 0x2e, 0x49, 0xc6, 0xd9, 0xf0, 0x5c, 0x5f, 0x4c, + 0x67, 0x8f, 0x49, 0x3b, 0xb9, 0x5c, 0xdb, 0x93, 0x4d, 0xe4, 0x91, 0xcb, 0xc4, 0xfe, 0x38, 0xe9, + 0x84, 0x26, 0x3b, 0xf1, 0x60, 0xa7, 0x13, 0xb2, 0x4a, 0xb3, 0xe3, 0x4f, 0x83, 0x99, 0xeb, 0xcf, + 0xb3, 0x36, 0x88, 0x35, 0x2f, 0x8f, 0x56, 0xc6, 0x52, 0xb6, 0x5b, 0x50, 0x4a, 0xbd, 0x76, 0xee, + 0xe6, 0x0f, 0x03, 0xb1, 0x85, 0xb7, 0x56, 0xaf, 0x6a, 0xff, 0xa6, 0x40, 0x45, 0x66, 0xa7, 0xb3, + 0x77, 0x79, 0x8b, 0x9e, 0x80, 0x3e, 0x15, 0x55, 0xd3, 0x4b, 0x74, 0xf7, 0xdf, 0x8f, 0x9c, 0x4c, + 0x71, 0xe2, 0xdb, 0xae, 0xbf, 0xbc, 0xaa, 0x29, 0xaf, 0xae, 0x6a, 0xca, 0xdf, 0x57, 0x35, 0xe5, + 0xa7, 0xeb, 0x5a, 0xee, 0xd5, 0x75, 0x2d, 0xf7, 0xd7, 0x75, 0x2d, 0xf7, 0x5c, 0x8f, 0xff, 0x82, + 0x4d, 0x74, 0xf9, 0xdf, 0xe9, 0xf1, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x98, 0x09, 0x9b, 0xd5, + 0xa1, 0x09, 0x00, 0x00, } func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { @@ -2559,7 +2559,7 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NegativeSpans = append(m.NegativeSpans, &BucketSpan{}) + m.NegativeSpans = append(m.NegativeSpans, BucketSpan{}) if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2725,7 +2725,7 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PositiveSpans = append(m.PositiveSpans, &BucketSpan{}) + m.PositiveSpans = append(m.PositiveSpans, BucketSpan{}) if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto index 0fea825508b..bf00b62e8fc 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto @@ -94,9 +94,9 @@ message Histogram { uint64 zero_count_int = 6; double zero_count_float = 7; } - + // Negative Buckets. - repeated BucketSpan negative_spans = 8; + repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false]; // Use either "negative_deltas" or "negative_counts", the former for // regular histograms with integer counts, the latter for float // histograms. @@ -104,7 +104,7 @@ message Histogram { repeated double negative_counts = 10; // Absolute count of each bucket. // Positive Buckets. - repeated BucketSpan positive_spans = 11; + repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false]; // Use either "positive_deltas" or "positive_counts", the former for // regular histograms with integer counts, the latter for float // histograms. diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go index a87a135ba0a..401dcad2dc9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go @@ -63,18 +63,21 @@ func (PartialResponseStrategy) EnumDescriptor() ([]byte, []int) { type Chunk_Encoding int32 const ( - Chunk_XOR Chunk_Encoding = 0 - Chunk_HISTOGRAM Chunk_Encoding = 1 + Chunk_XOR Chunk_Encoding = 0 + Chunk_HISTOGRAM Chunk_Encoding = 1 + Chunk_FLOAT_HISTOGRAM Chunk_Encoding = 2 ) var Chunk_Encoding_name = map[int32]string{ 0: "XOR", 1: "HISTOGRAM", + 2: "FLOAT_HISTOGRAM", } var Chunk_Encoding_value = map[string]int32{ - "XOR": 0, - "HISTOGRAM": 1, + "XOR": 0, + "HISTOGRAM": 1, + "FLOAT_HISTOGRAM": 2, } func (x Chunk_Encoding) String() string { @@ -290,42 +293,43 @@ func init() { func init() { proto.RegisterFile("store/storepb/types.proto", fileDescriptor_121fba57de02d8e0) } var fileDescriptor_121fba57de02d8e0 = []byte{ - // 553 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x93, 0xcf, 0x6e, 0xd3, 0x4e, - 0x10, 0xc7, 0xbd, 0xb6, 0xe3, 0x24, 0xf3, 0x6b, 0x7f, 0x32, 0xab, 0x0a, 0xdc, 0x1e, 0x9c, 0xc8, - 0x08, 0x11, 0x55, 0xaa, 0x2d, 0x15, 0x8e, 0x5c, 0x12, 0x14, 0x01, 0x12, 0x6d, 0xe8, 0x26, 0x12, - 0xa8, 0x17, 0xb4, 0x71, 0x57, 0xb6, 0xd5, 0xf8, 0x8f, 0xbc, 0x6b, 0x48, 0x1e, 0x80, 0x3b, 0x88, - 0x3b, 0xcf, 0x93, 0x63, 0x8f, 0x88, 0x43, 0x04, 0xc9, 0x8b, 0x20, 0xaf, 0x1d, 0x20, 0x52, 0x2e, - 0xd6, 0x78, 0x3e, 0xdf, 0x99, 0xd9, 0x99, 0x9d, 0x85, 0x63, 0x2e, 0xd2, 0x9c, 0x79, 0xf2, 0x9b, - 0x4d, 0x3d, 0xb1, 0xc8, 0x18, 0x77, 0xb3, 0x3c, 0x15, 0x29, 0x36, 0x44, 0x48, 0x93, 0x94, 0x9f, - 0x1c, 0x05, 0x69, 0x90, 0x4a, 0x97, 0x57, 0x5a, 0x15, 0x3d, 0xa9, 0x03, 0x67, 0x74, 0xca, 0x66, - 0xbb, 0x81, 0xce, 0x27, 0x04, 0x8d, 0xe7, 0x61, 0x91, 0xdc, 0xe2, 0x53, 0xd0, 0x4b, 0x60, 0xa1, - 0x2e, 0xea, 0xfd, 0x7f, 0x7e, 0xdf, 0xad, 0x32, 0xba, 0x12, 0xba, 0xc3, 0xc4, 0x4f, 0x6f, 0xa2, - 0x24, 0x20, 0x52, 0x83, 0x31, 0xe8, 0x37, 0x54, 0x50, 0x4b, 0xed, 0xa2, 0xde, 0x01, 0x91, 0x36, - 0xb6, 0x40, 0x0f, 0x29, 0x0f, 0x2d, 0xad, 0x8b, 0x7a, 0xfa, 0x40, 0x5f, 0xae, 0x3a, 0x88, 0x48, - 0x8f, 0xe3, 0x40, 0x6b, 0x1b, 0x8f, 0x9b, 0xa0, 0xbd, 0x1b, 0x11, 0x53, 0xc1, 0x87, 0xd0, 0x7e, - 0xf9, 0x6a, 0x3c, 0x19, 0xbd, 0x20, 0xfd, 0x0b, 0x13, 0x39, 0xdf, 0x10, 0x18, 0x63, 0x96, 0x47, - 0x8c, 0x63, 0x1f, 0x0c, 0x79, 0x52, 0x6e, 0xa1, 0xae, 0xd6, 0xfb, 0xef, 0xfc, 0x70, 0x7b, 0x94, - 0xd7, 0xa5, 0x77, 0xf0, 0x6c, 0xb9, 0xea, 0x28, 0x3f, 0x56, 0x9d, 0xa7, 0x41, 0x24, 0xc2, 0x62, - 0xea, 0xfa, 0x69, 0xec, 0x55, 0x82, 0xb3, 0x28, 0xad, 0x2d, 0x2f, 0xbb, 0x0d, 0xbc, 0x9d, 0xa6, - 0xdd, 0x6b, 0x19, 0x4d, 0xea, 0xd4, 0xd8, 0x03, 0xc3, 0x2f, 0x3b, 0xe3, 0x96, 0x2a, 0x8b, 0xdc, - 0xdb, 0x16, 0xe9, 0x07, 0x41, 0x2e, 0x7b, 0x96, 0x2d, 0x28, 0xa4, 0x96, 0x39, 0x5f, 0x55, 0x68, - 0xff, 0x61, 0xf8, 0x18, 0x5a, 0x71, 0x94, 0xbc, 0x17, 0x51, 0x5c, 0x0d, 0x4c, 0x23, 0xcd, 0x38, - 0x4a, 0x26, 0x51, 0xcc, 0x24, 0xa2, 0xf3, 0x0a, 0xa9, 0x35, 0xa2, 0x73, 0x89, 0x3a, 0xa0, 0xe5, - 0xf4, 0xa3, 0x9c, 0xd0, 0x3f, 0x6d, 0xc9, 0x8c, 0xa4, 0x24, 0xf8, 0x21, 0x34, 0xfc, 0xb4, 0x48, - 0x84, 0xa5, 0xef, 0x93, 0x54, 0xac, 0xcc, 0xc2, 0x8b, 0xd8, 0x6a, 0xec, 0xcd, 0xc2, 0x8b, 0xb8, - 0x14, 0xc4, 0x51, 0x62, 0x19, 0x7b, 0x05, 0x71, 0x94, 0x48, 0x01, 0x9d, 0x5b, 0xcd, 0xfd, 0x02, - 0x3a, 0xc7, 0x8f, 0xa1, 0x29, 0x6b, 0xb1, 0xdc, 0x6a, 0xed, 0x13, 0x6d, 0xa9, 0xf3, 0x05, 0xc1, - 0x81, 0x1c, 0xec, 0x05, 0x15, 0x7e, 0xc8, 0x72, 0x7c, 0xb6, 0xb3, 0x45, 0xc7, 0x3b, 0x57, 0x57, - 0x6b, 0xdc, 0xc9, 0x22, 0x63, 0x7f, 0x17, 0x29, 0xa1, 0xf5, 0xa0, 0xda, 0x44, 0xda, 0xf8, 0x08, - 0x1a, 0x1f, 0xe8, 0xac, 0x60, 0x72, 0x4e, 0x6d, 0x52, 0xfd, 0x38, 0x3d, 0xd0, 0xcb, 0x38, 0x6c, - 0x80, 0x3a, 0xbc, 0x32, 0x95, 0x72, 0x91, 0x2e, 0x87, 0x57, 0x26, 0x2a, 0x1d, 0x64, 0x68, 0xaa, - 0xd2, 0x41, 0x86, 0xa6, 0x76, 0xea, 0xc2, 0x83, 0x37, 0x34, 0x17, 0x11, 0x9d, 0x11, 0xc6, 0xb3, - 0x34, 0xe1, 0x6c, 0x2c, 0x72, 0x2a, 0x58, 0xb0, 0xc0, 0x2d, 0xd0, 0xdf, 0xf6, 0xc9, 0xa5, 0xa9, - 0xe0, 0x36, 0x34, 0xfa, 0x83, 0x11, 0x99, 0x98, 0x68, 0xf0, 0x68, 0xf9, 0xcb, 0x56, 0x96, 0x6b, - 0x1b, 0xdd, 0xad, 0x6d, 0xf4, 0x73, 0x6d, 0xa3, 0xcf, 0x1b, 0x5b, 0xb9, 0xdb, 0xd8, 0xca, 0xf7, - 0x8d, 0xad, 0x5c, 0x37, 0xeb, 0xe7, 0x36, 0x35, 0xe4, 0x83, 0x79, 0xf2, 0x3b, 0x00, 0x00, 0xff, - 0xff, 0x44, 0x9d, 0x95, 0xa2, 0x86, 0x03, 0x00, 0x00, + // 565 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0xf5, 0xda, 0x8e, 0x93, 0x0c, 0x2d, 0x98, 0xa5, 0x02, 0xb7, 0x07, 0x27, 0x32, 0x42, 0x44, + 0x95, 0x6a, 0x4b, 0x05, 0x89, 0x0b, 0x97, 0x04, 0x85, 0x0f, 0xa9, 0x6d, 0xe8, 0x26, 0x12, 0xa8, + 0x97, 0x6a, 0xe3, 0xae, 0x6c, 0xab, 0xf1, 0x87, 0xec, 0x35, 0x24, 0xff, 0x02, 0xc4, 0x8d, 0x03, + 0xbf, 0x27, 0xc7, 0x1e, 0x11, 0x87, 0x08, 0x92, 0x3f, 0x82, 0xbc, 0x76, 0x28, 0x91, 0x72, 0xb1, + 0xc6, 0xef, 0xbd, 0x99, 0xd9, 0x79, 0x3b, 0x0b, 0xfb, 0x19, 0x8f, 0x53, 0xe6, 0x88, 0x6f, 0x32, + 0x76, 0xf8, 0x2c, 0x61, 0x99, 0x9d, 0xa4, 0x31, 0x8f, 0xb1, 0xc6, 0x7d, 0x1a, 0xc5, 0xd9, 0xc1, + 0x9e, 0x17, 0x7b, 0xb1, 0x80, 0x9c, 0x22, 0x2a, 0xd9, 0x83, 0x2a, 0x71, 0x42, 0xc7, 0x6c, 0xb2, + 0x99, 0x68, 0x7d, 0x47, 0x50, 0x7b, 0xe5, 0xe7, 0xd1, 0x35, 0x3e, 0x04, 0xb5, 0x20, 0x0c, 0xd4, + 0x46, 0x9d, 0xbb, 0xc7, 0x0f, 0xed, 0xb2, 0xa2, 0x2d, 0x48, 0xbb, 0x1f, 0xb9, 0xf1, 0x55, 0x10, + 0x79, 0x44, 0x68, 0x30, 0x06, 0xf5, 0x8a, 0x72, 0x6a, 0xc8, 0x6d, 0xd4, 0xd9, 0x21, 0x22, 0xc6, + 0x06, 0xa8, 0x3e, 0xcd, 0x7c, 0x43, 0x69, 0xa3, 0x8e, 0xda, 0x53, 0xe7, 0x8b, 0x16, 0x22, 0x02, + 0xb1, 0x5e, 0x40, 0x63, 0x9d, 0x8f, 0xeb, 0xa0, 0x7c, 0x1c, 0x10, 0x5d, 0xc2, 0xbb, 0xd0, 0x7c, + 0xfb, 0x6e, 0x38, 0x1a, 0xbc, 0x21, 0xdd, 0x53, 0x1d, 0xe1, 0x07, 0x70, 0xef, 0xf5, 0xc9, 0xa0, + 0x3b, 0xba, 0xbc, 0x05, 0x65, 0xeb, 0x07, 0x02, 0x6d, 0xc8, 0xd2, 0x80, 0x65, 0xd8, 0x05, 0x4d, + 0x1c, 0x3f, 0x33, 0x50, 0x5b, 0xe9, 0xdc, 0x39, 0xde, 0x5d, 0x9f, 0xef, 0xa4, 0x40, 0x7b, 0x2f, + 0xe7, 0x8b, 0x96, 0xf4, 0x6b, 0xd1, 0x7a, 0xee, 0x05, 0xdc, 0xcf, 0xc7, 0xb6, 0x1b, 0x87, 0x4e, + 0x29, 0x38, 0x0a, 0xe2, 0x2a, 0x72, 0x92, 0x6b, 0xcf, 0xd9, 0x70, 0xc2, 0xbe, 0x10, 0xd9, 0xa4, + 0x2a, 0x8d, 0x1d, 0xd0, 0xdc, 0x62, 0xdc, 0xcc, 0x90, 0x45, 0x93, 0xfb, 0xeb, 0x26, 0x5d, 0xcf, + 0x4b, 0x85, 0x11, 0x62, 0x2e, 0x89, 0x54, 0x32, 0xeb, 0x9b, 0x0c, 0xcd, 0x7f, 0x1c, 0xde, 0x87, + 0x46, 0x18, 0x44, 0x97, 0x3c, 0x08, 0x4b, 0x17, 0x15, 0x52, 0x0f, 0x83, 0x68, 0x14, 0x84, 0x4c, + 0x50, 0x74, 0x5a, 0x52, 0x72, 0x45, 0xd1, 0xa9, 0xa0, 0x5a, 0xa0, 0xa4, 0xf4, 0xb3, 0xb0, 0xed, + 0xbf, 0xb1, 0x44, 0x45, 0x52, 0x30, 0xf8, 0x31, 0xd4, 0xdc, 0x38, 0x8f, 0xb8, 0xa1, 0x6e, 0x93, + 0x94, 0x5c, 0x51, 0x25, 0xcb, 0x43, 0xa3, 0xb6, 0xb5, 0x4a, 0x96, 0x87, 0x85, 0x20, 0x0c, 0x22, + 0x43, 0xdb, 0x2a, 0x08, 0x83, 0x48, 0x08, 0xe8, 0xd4, 0xa8, 0x6f, 0x17, 0xd0, 0x29, 0x7e, 0x0a, + 0x75, 0xd1, 0x8b, 0xa5, 0x46, 0x63, 0x9b, 0x68, 0xcd, 0x5a, 0x5f, 0x11, 0xec, 0x08, 0x63, 0x4f, + 0x29, 0x77, 0x7d, 0x96, 0xe2, 0xa3, 0x8d, 0xd5, 0xda, 0xdf, 0xb8, 0xba, 0x4a, 0x63, 0x8f, 0x66, + 0x09, 0xbb, 0xdd, 0xae, 0x88, 0x56, 0x46, 0x35, 0x89, 0x88, 0xf1, 0x1e, 0xd4, 0x3e, 0xd1, 0x49, + 0xce, 0x84, 0x4f, 0x4d, 0x52, 0xfe, 0x58, 0x1d, 0x50, 0x8b, 0x3c, 0xac, 0x81, 0xdc, 0x3f, 0xd7, + 0xa5, 0x62, 0xbb, 0xce, 0xfa, 0xe7, 0x3a, 0x2a, 0x00, 0xd2, 0xd7, 0x65, 0x01, 0x90, 0xbe, 0xae, + 0x1c, 0xda, 0xf0, 0xe8, 0x3d, 0x4d, 0x79, 0x40, 0x27, 0x84, 0x65, 0x49, 0x1c, 0x65, 0x6c, 0xc8, + 0x53, 0xca, 0x99, 0x37, 0xc3, 0x0d, 0x50, 0x3f, 0x74, 0xc9, 0x99, 0x2e, 0xe1, 0x26, 0xd4, 0xba, + 0xbd, 0x01, 0x19, 0xe9, 0xa8, 0xf7, 0x64, 0xfe, 0xc7, 0x94, 0xe6, 0x4b, 0x13, 0xdd, 0x2c, 0x4d, + 0xf4, 0x7b, 0x69, 0xa2, 0x2f, 0x2b, 0x53, 0xba, 0x59, 0x99, 0xd2, 0xcf, 0x95, 0x29, 0x5d, 0xd4, + 0xab, 0x37, 0x38, 0xd6, 0xc4, 0x2b, 0x7a, 0xf6, 0x37, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x6d, 0x25, + 0xf3, 0x9b, 0x03, 0x00, 0x00, } func (m *Chunk) Marshal() (dAtA []byte, err error) { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto index 67c93fa52ed..840d3c51881 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto @@ -24,6 +24,7 @@ message Chunk { enum Encoding { XOR = 0; HISTOGRAM = 1; + FLOAT_HISTOGRAM = 2; } Encoding type = 1; bytes data = 2; diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index b3e8783cc59..2cf71f0f93f 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -8,7 +8,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" @@ -142,10 +141,8 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar // Second, try a well-known file. filename := wellKnownFile() - if creds, err := readCredentialsFile(ctx, filename, params); err == nil { - return creds, nil - } else if !os.IsNotExist(err) { - return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + if b, err := os.ReadFile(filename); err == nil { + return CredentialsFromJSONWithParams(ctx, b, params) } // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) @@ -231,7 +228,7 @@ func wellKnownFile() string { } func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { - b, err := ioutil.ReadFile(filename) + b, err := os.ReadFile(filename) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index c0ab196cf46..14989beaf49 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -14,7 +14,7 @@ import ( // ParseKey converts the binary contents of a private key file // to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key +// PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. func ParseKey(key []byte) (*rsa.PrivateKey, error) { diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index b4723fcacea..58901bda53e 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -55,12 +55,18 @@ type Token struct { } // tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. +// providers returning a token or error in JSON form. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1 type tokenJSON struct { AccessToken string `json:"access_token"` TokenType string `json:"token_type"` RefreshToken string `json:"refresh_token"` ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + // error fields + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` } func (e *tokenJSON) expiry() (t time.Time) { @@ -236,21 +242,29 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ - Response: r, - Body: body, - } + + failureStatus := r.StatusCode < 200 || r.StatusCode > 299 + retrieveError := &RetrieveError{ + Response: r, + Body: body, + // attempt to populate error detail below } var token *Token content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) switch content { case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string vals, err := url.ParseQuery(string(body)) if err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse response: %v", err) } + retrieveError.ErrorCode = vals.Get("error") + retrieveError.ErrorDescription = vals.Get("error_description") + retrieveError.ErrorURI = vals.Get("error_uri") token = &Token{ AccessToken: vals.Get("access_token"), TokenType: vals.Get("token_type"), @@ -265,8 +279,14 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { default: var tj tokenJSON if err = json.Unmarshal(body, &tj); err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse json: %v", err) } + retrieveError.ErrorCode = tj.ErrorCode + retrieveError.ErrorDescription = tj.ErrorDescription + retrieveError.ErrorURI = tj.ErrorURI token = &Token{ AccessToken: tj.AccessToken, TokenType: tj.TokenType, @@ -276,17 +296,37 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } + // according to spec, servers should respond status 400 in error case + // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 + // but some unorthodox servers respond 200 in error case + if failureStatus || retrieveError.ErrorCode != "" { + return nil, retrieveError + } if token.AccessToken == "" { return nil, errors.New("oauth2: server response missing access_token") } return token, nil } +// mirrors oauth2.RetrieveError type RetrieveError struct { - Response *http.Response - Body []byte + Response *http.Response + Body []byte + ErrorCode string + ErrorDescription string + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 7c64006de69..5ffce9764be 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -175,14 +175,31 @@ func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) } // RetrieveError is the error returned when the token endpoint returns a -// non-2XX HTTP status code. +// non-2XX HTTP status code or populates RFC 6749's 'error' parameter. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 type RetrieveError struct { Response *http.Response // Body is the body that was consumed by reading Response.Body. // It may be truncated. Body []byte + // ErrorCode is RFC 6749's 'error' parameter. + ErrorCode string + // ErrorDescription is RFC 6749's 'error_description' parameter. + ErrorDescription string + // ErrorURI is RFC 6749's 'error_uri' parameter. + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/vendor/modules.txt b/vendor/modules.txt index f32adcbe7a0..b9be5793302 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -712,10 +712,10 @@ github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/push github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil/promlint -# github.com/prometheus/client_model v0.3.0 -## explicit; go 1.9 +# github.com/prometheus/client_model v0.4.0 +## explicit; go 1.18 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.42.0 +# github.com/prometheus/common v0.44.0 ## explicit; go 1.18 github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -785,7 +785,7 @@ github.com/prometheus/prometheus/util/teststorage github.com/prometheus/prometheus/util/testutil github.com/prometheus/prometheus/util/zeropool github.com/prometheus/prometheus/web/api/v1 -# github.com/redis/rueidis v1.0.2-go1.18 +# github.com/redis/rueidis v1.0.10-go1.18 ## explicit; go 1.18 github.com/redis/rueidis github.com/redis/rueidis/internal/cmds @@ -863,7 +863,7 @@ github.com/thanos-io/promql-engine/logicalplan github.com/thanos-io/promql-engine/parser github.com/thanos-io/promql-engine/query github.com/thanos-io/promql-engine/worker -# github.com/thanos-io/thanos v0.31.1-0.20230627154113-7cfaf3fe2d43 +# github.com/thanos-io/thanos v0.31.1-0.20230711160112-df3a5f808726 ## explicit; go 1.18 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader @@ -1143,7 +1143,7 @@ golang.org/x/net/ipv6 golang.org/x/net/netutil golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.7.0 +# golang.org/x/oauth2 v0.8.0 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler