diff --git a/CHANGELOG.md b/CHANGELOG.md index da41e6ebbec..e80e8e03875 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ * [ENHANCEMENT] Compactor: concurrently run blocks cleaner for multiple tenants. Concurrency can be configured via `-compactor.cleanup-concurrency`. #3483 * [ENHANCEMENT] Compactor: shuffle tenants before running compaction. #3483 * [ENHANCEMENT] Compactor: wait for a stable ring at startup, when sharding is enabled. #3484 +* [ENHANCEMENT] Store-gateway: added `-blocks-storage.bucket-store.index-header-lazy-loading-enabled` to enable index-header lazy loading (experimental). When enabled, index-headers will be mmap-ed only once required by a query and will be automatically released after `-blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout` time of inactivity. #3498 * [BUGFIX] Blocks storage ingester: fixed some cases leading to a TSDB WAL corruption after a partial write to disk. #3423 * [BUGFIX] Blocks storage: Fix the race between ingestion and `/flush` call resulting in overlapping blocks. #3422 * [BUGFIX] Querier: fixed `-querier.max-query-into-future` which wasn't correctly enforced on range queries. #3452 diff --git a/docs/configuration/v1-guarantees.md b/docs/configuration/v1-guarantees.md index a0c77402b12..5a5484c9bef 100644 --- a/docs/configuration/v1-guarantees.md +++ b/docs/configuration/v1-guarantees.md @@ -57,3 +57,4 @@ Currently experimental features are: - Metric relabeling in the distributor. - Scalable query-frontend (when using query-scheduler) - Querying store for series, labels APIs (`-querier.query-store-for-labels-enabled`) +- Blocks storage: lazy mmap of block indexes in the store-gateway (`-blocks-storage.bucket-store.index-header-lazy-loading-enabled`) diff --git a/go.mod b/go.mod index fae75e0aafd..59553686608 100644 --- a/go.mod +++ b/go.mod @@ -35,6 +35,7 @@ require ( github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/memberlist v0.2.2 github.com/json-iterator/go v1.1.10 + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lib/pq v1.3.0 github.com/mitchellh/go-wordwrap v1.0.0 github.com/ncw/swift v1.0.50 @@ -52,7 +53,7 @@ require ( github.com/sony/gobreaker v0.4.1 github.com/spf13/afero v1.2.2 github.com/stretchr/testify v1.6.1 - github.com/thanos-io/thanos v0.13.1-0.20201030101306-47f9a225cc52 + github.com/thanos-io/thanos v0.13.1-0.20201112171553-05fbe15616c7 github.com/uber/jaeger-client-go v2.25.0+incompatible github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099 go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50 @@ -82,6 +83,9 @@ replace github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915 // We can't upgrade to grpc 1.30.0 until go.etcd.io/etcd will support it. replace google.golang.org/grpc => google.golang.org/grpc v1.29.1 +// We can't upgrade until grpc upgrade is unblocked. +replace github.com/sercand/kuberesolver => github.com/sercand/kuberesolver v2.4.0+incompatible + // Using a 3rd-party branch for custom dialer - see https://github.com/bradfitz/gomemcache/pull/86 replace github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab diff --git a/go.sum b/go.sum index 2d4508d4d47..51281eb5c1f 100644 --- a/go.sum +++ b/go.sum @@ -54,7 +54,6 @@ github.com/Azure/azure-sdk-for-go v23.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v44.2.0+incompatible h1:d0WY8HTXhnurVBAkLXzv4bRpd+P5r3U/W17Z88PJWiI= github.com/Azure/azure-sdk-for-go v44.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v46.4.0+incompatible h1:fCN6Pi+tEiEwFa8RSmtVlFHRXEZ+DJm9gfx/MKqYWw4= @@ -70,7 +69,6 @@ github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+B github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.10.2/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.2 h1:BR5GoSGobeiMwGOOIxXuvNKNPy+HMGdteKB8kJUDnBE= github.com/Azure/go-autorest/autorest v0.11.2/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.10 h1:j5sGbX7uj1ieYYkQ3Mpvewd4DCsEQ+ZeJpqnSM9pjnM= @@ -79,7 +77,6 @@ github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEg github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= @@ -91,13 +88,14 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503 h1:2McfZNaDqGPjv2pddK547PENIk4HV+NT7gvqRq4L0us= github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503 h1:RBrGlrkPWapMcLp1M6ywCqyYKOAT5ERI6lYFvGKOThE= github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= @@ -125,6 +123,7 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= @@ -183,7 +182,6 @@ github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.33.12 h1:eydMoSwfrSTD9PWKUJOiDL7+/UwDW8AjInUGVE5Llh4= github.com/aws/aws-sdk-go v1.33.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.35.5 h1:doSEOxC0UkirPcle20Rc+1kAhJ4Ip+GSEeZ3nKl7Qlk= @@ -240,6 +238,7 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.4 h1:3o0smo5SKY7H6AJCmJhsnCjR2/V2T8VmiHt7seN2/kI= github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -247,6 +246,7 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf h1:CAKfRE2YtTUIjjh1bkBtyYFaUT/WmOqsJjgtihT0vMI= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -254,6 +254,7 @@ github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3 github.com/cortexproject/cortex v1.2.1-0.20200805064754-d8edc95e2c91/go.mod h1:PVPxNLrxKH+yc8asaJOxuz7TiRmMizFfnSMOnRzM6oM= github.com/cortexproject/cortex v1.3.1-0.20200923145333-8587ea61fe17/go.mod h1:dJ9gpW7dzQ7z09cKtNN9PfebumgyO4dtNdFQ6eQEed0= github.com/cortexproject/cortex v1.4.1-0.20201030080541-83ad6df2abea/go.mod h1:kXo5F3jlF7Ky3+I31jt/bXTzOlQjl2X/vGDpy0RY1gU= +github.com/cortexproject/cortex v1.5.1-0.20201111110551-ba512881b076/go.mod h1:zFBGVsvRBfVp6ARXZ7pmiLaGlbjda5ZnA4Y6qSJyrQg= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= @@ -283,7 +284,6 @@ github.com/dhui/dktest v0.3.0 h1:kwX5a7EkLcjo7VpsPQSYJcKGbXBXdjI9FGjuUj1jn6I= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= github.com/digitalocean/godo v1.37.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/digitalocean/godo v1.38.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= -github.com/digitalocean/godo v1.42.0 h1:xQlEFLhQ1zZUryJAfiWb8meLPPCWnLO901U5Imhh0Mc= github.com/digitalocean/godo v1.42.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/digitalocean/godo v1.46.0 h1:WRbwjATilgz2NE4NGMeSDpeicy9h4xSKNGuRJ/Nq/fA= @@ -324,7 +324,6 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -341,7 +340,6 @@ github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= @@ -599,17 +597,16 @@ github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsC github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/googleapis/gnostic v0.4.0 h1:BXDUo8p/DaxC+4FJY/SSx3gvnx9C1VdHNgaUkiEL5mk= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gophercloud/gophercloud v0.11.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= -github.com/gophercloud/gophercloud v0.12.0 h1:mZrie07npp6ODiwHZolTicr5jV8Ogn43AvAsSMm6Ork= github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/gophercloud v0.13.0 h1:1XkslZZRm6Ks0bLup+hBNth+KQf+0JA1UeoB7YKw9E8= github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -618,6 +615,7 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c h1:Lh2aW+HnU2Nbe1gqD9SOJLJxW1jBMmQOktN2acDyJk8= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -633,7 +631,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpg github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.14.6 h1:8ERzHx8aj1Sc47mu9n/AksaKCSWrMchFtkdrS4BIj5o= github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.15.0 h1:ntPNC9TD/6l2XDenJZe6T5lSMg95thpV9sGAqHX4WU8= github.com/grpc-ecosystem/grpc-gateway v1.15.0/go.mod h1:vO11I9oWA+KsxmfFQPhLnnIb1VDE24M+pdxZFiuZcA8= @@ -643,7 +640,6 @@ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMW github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= -github.com/hashicorp/consul/api v1.5.0 h1:Yo2bneoGy68A7aNwmuETFnPhjyBEm7n3vzRacEVMjvI= github.com/hashicorp/consul/api v1.5.0/go.mod h1:LqwrLNW876eYSuUOo4ZLHBcdKc038txr/IMfbLPATa4= github.com/hashicorp/consul/api v1.6.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= github.com/hashicorp/consul/api v1.7.0 h1:tGs8Oep67r8CcA2Ycmb/8BLBcJ70St44mF2X10a/qPg= @@ -651,7 +647,6 @@ github.com/hashicorp/consul/api v1.7.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= -github.com/hashicorp/consul/sdk v0.5.0 h1:WC4594Wp/LkEeML/OdQKEC1yqBmEYkRp6i7X5u0zDAs= github.com/hashicorp/consul/sdk v0.5.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.6.0 h1:FfhMEkwvQl57CildXJyGHnwGGM4HMODGyfjGwNM1Vdw= github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= @@ -667,6 +662,7 @@ github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -705,7 +701,6 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= -github.com/hashicorp/serf v0.9.0 h1:+Zd/16AJ9lxk9RzfTDyv/TLhZ8UerqYS0/+JGCIDaa0= github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= github.com/hashicorp/serf v0.9.3 h1:AVF6JDQQens6nMHT9OGERBvK0f8rPrAGILnsKLr6lzM= github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= @@ -741,7 +736,6 @@ github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGAR github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -785,6 +779,7 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= @@ -811,8 +806,9 @@ github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LE github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939 h1:yZJImkCmVI6d1uJ9KRRf/96YbFLDQ/hhs6Xt9Z3OBXI= github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/leanovate/gopter v0.2.4 h1:U4YLBggDFhJdqQsG4Na2zX7joVTky9vHaj/AGEwSuXU= @@ -840,6 +836,7 @@ github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe h1:YioO2TiJyAHWHyCRQCP8jk5IzTqmsbGc5qQPIhHo6xs= github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= @@ -873,7 +870,6 @@ github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.30 h1:Qww6FseFn8PRfw07jueqIXqodm0JKiiKuK0DeXSqfyo= github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= @@ -1052,7 +1048,6 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2 github.com/prometheus/common v0.8.0/go.mod h1:PC/OgXc+UN7B4ALwvn1yzVZmVwvhXp5JsbBv6wSv6i0= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.11.1 h1:0ZISXCMRuCZcxF77aT1BXY5m74mX2vrGYl1dSwBI0Jo= github.com/prometheus/common v0.11.1/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.12.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.14.0 h1:RHRyE8UocrbjU+6UvRzwi6HjiDfxrrBU91TtbKzkGp4= @@ -1079,13 +1074,11 @@ github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33/go.mod h1: github.com/prometheus/prometheus v1.8.2-0.20200707115909-30505a202a4c/go.mod h1:/kMSPIRsxr/apyHxlzYMdFnaPXUXXqILU5uzIoNhOvc= github.com/prometheus/prometheus v1.8.2-0.20200722151933-4a8531a64b32/go.mod h1:+/y4DzJ62qmhy0o/H4PtXegRXw+80E8RVRHhLbv+bkM= github.com/prometheus/prometheus v1.8.2-0.20200805082714-e0cf219f0de2/go.mod h1:i1KZsZmyDTJRvnR7zE8z/u2v+tkpPjoiPpnWp6nwhr0= -github.com/prometheus/prometheus v1.8.2-0.20200819132913-cb830b0a9c78 h1:tHIAD+hgCIb86T0/Du7vGyfHa6J1+XsImQoY8Ete+c8= github.com/prometheus/prometheus v1.8.2-0.20200819132913-cb830b0a9c78/go.mod h1:zfAqy/MwhMFajB9E2n12/9gG2fvofIE9uKDtlZCDxqs= github.com/prometheus/prometheus v1.8.2-0.20200923143134-7e2db3d092f3/go.mod h1:9VNWoDFHOMovlubld5uKKxfCDcPBj2GMOCjcUFXkYaM= github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24/go.mod h1:MDRkz271loM/PrYN+wUNEaTMDGSP760MQzB0yEjdgSQ= github.com/prometheus/prometheus v1.8.2-0.20201029103703-63be30dceed9 h1:T6pkPNGKXv21lLfgD/mnIABj9aOhmz8HphDmKllfKWs= github.com/prometheus/prometheus v1.8.2-0.20201029103703-63be30dceed9/go.mod h1:MDRkz271loM/PrYN+wUNEaTMDGSP760MQzB0yEjdgSQ= -github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1 h1:+kGqA4dNN5hn7WwvKdzHl0rdN5AEkbNZd0VjRltAiZg= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= @@ -1113,7 +1106,6 @@ github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e h1:uO75wNGioszj github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -1124,7 +1116,6 @@ github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c h1:XLPw6rny9Vrrvrzhw8pNLrC2+x/kH0a/3gOx5xWDa6Y= github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= @@ -1137,6 +1128,7 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= @@ -1148,6 +1140,7 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k github.com/sony/gobreaker v0.4.1 h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -1178,18 +1171,19 @@ github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d/go.mod h1:usT/TxtJQ7DzinTt+G9kinDQmRS5sxwu0unVKZ9vdcw= github.com/thanos-io/thanos v0.13.1-0.20200731083140-69b87607decf/go.mod h1:G8caR6G7pSDreRDvFm9wFuyjEBztmr8Ag3kBYpa/fEc= -github.com/thanos-io/thanos v0.13.1-0.20200807203500-9b578afb4763 h1:c84P3YUu8bxLWE2csCSK4XJNi5FxcC+HL4WDNDEbTwA= github.com/thanos-io/thanos v0.13.1-0.20200807203500-9b578afb4763/go.mod h1:KyW0a93tsh7v4hXAwo2CVAIRYuZT1Kkf4e04gisQjAg= -github.com/thanos-io/thanos v0.13.1-0.20201019130456-f41940581d9a h1:4rNkFHeY+EIR7UdiYn5fZE7Q35Y3Dmae8q1Qbb90tcY= github.com/thanos-io/thanos v0.13.1-0.20201019130456-f41940581d9a/go.mod h1:A3qUEEbsVkplJnxyDLwuIuvTDaJPByTH+hMdTl9ujAA= github.com/thanos-io/thanos v0.13.1-0.20201030101306-47f9a225cc52 h1:z3hglXVwJ4HgU0OoDS+8+MvEipv/U83IQ+fMsDr00YQ= github.com/thanos-io/thanos v0.13.1-0.20201030101306-47f9a225cc52/go.mod h1:OqqX4x21cg5N5MMHd/yGQAc/V3wg8a7Do4Jk8HfaFZQ= +github.com/thanos-io/thanos v0.13.1-0.20201112171553-05fbe15616c7 h1:KOXwbcE+UCd5iJH9/DMIFXbm1s5YzjB5SvhnnRLq2xA= +github.com/thanos-io/thanos v0.13.1-0.20201112171553-05fbe15616c7/go.mod h1:4UaILQWp/n04Gn9nKK0DO5HA7UEwVrjNX2Zpq7DEWPE= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 h1:ndzgwNDnKIqyCvHTXaCqh9KlOWKvBry6nuXMJmonVsE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1201,7 +1195,6 @@ github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.4.0+incompatible h1:fY7QsGQWiCt8pajv4r7JEvmATdCVaWxXbjwyYwsNaLQ= github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= @@ -1264,17 +1257,16 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= -go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -1282,6 +1274,7 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEa go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= @@ -1308,7 +1301,6 @@ golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de h1:ikNHVSjEfnvz6sxdSPCaPt572qowuyMDMJLLm3Db3ig= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1498,7 +1490,6 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1587,7 +1578,6 @@ golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200603131246-cc40288be839/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200725200936-102e7d357031 h1:VtIxiVHWPhnny2ZTi4f9/2diZKqyLaq3FUTuud5+khA= golang.org/x/tools v0.0.0-20200725200936-102e7d357031/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -1674,11 +1664,9 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20200603110839-e855014d5736/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7 h1:AWgNCmk2V5HZp9AiCDRBExX/b9I0Ey9F8STHDZlhCC4= google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70 h1:wboULUXGF3c5qdUnKp+6gLAccE6PRpa/czkYvQ4UXv8= google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d h1:92D1fum1bJLKSdr11OJ+54YeCMCGYIygTA7R/YZxH5M= @@ -1754,7 +1742,6 @@ k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN k8s.io/api v0.0.0-20191115095533-47f6de673b26/go.mod h1:iA/8arsvelvo4IDqIhX4IbjTEKBGgvsf2OraTuRtLFU= k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= k8s.io/api v0.18.5/go.mod h1:tN+e/2nbdGKOAH55NMV8oGrMG+3uRlA9GaRfvnCCSNk= -k8s.io/api v0.18.6 h1:osqrAXbOQjkKIWDTjrqxWQ3w0GkKb1KA1XkUGHHYpeE= k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= @@ -1763,7 +1750,6 @@ k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXC k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuvRdJDHw2Aerij/yVGJSre0bZQSVJA= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.18.5/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc= @@ -1786,13 +1772,11 @@ k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6/go.mod h1:RZvgC8MSN6DjiMV6oIfEE9pDL9CYXokkfaCKZeHm3nc= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200414100711-2df71ebbae66 h1:Ly1Oxdu5p5ZFmiVT71LFgeZETvMfZ1iBIGeOenT2JeM= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -1804,7 +1788,6 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/pkg/compactor/blocks_cleaner.go b/pkg/compactor/blocks_cleaner.go index 215f8dfe346..a9bfa85b38c 100644 --- a/pkg/compactor/blocks_cleaner.go +++ b/pkg/compactor/blocks_cleaner.go @@ -186,8 +186,8 @@ func (c *BlocksCleaner) cleanUserPartialBlocks(ctx context.Context, partials map } // We can safely delete only partial blocks with a deletion mark. - _, err := metadata.ReadDeletionMark(ctx, userBucket, userLogger, blockID.String()) - if err == metadata.ErrorDeletionMarkNotFound { + err := metadata.ReadMarker(ctx, userLogger, userBucket, blockID.String(), &metadata.DeletionMark{}) + if err == metadata.ErrorMarkerNotFound { continue } if err != nil { diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index e566fe7f943..71c9ef137e0 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -99,9 +99,9 @@ type Compactor struct { // If empty, no users are disabled. If not empty, users in the map are disabled (not owned by this compactor). disabledUsers map[string]struct{} - // Function that creates bucket client and TSDB compactor using the context. + // Function that creates bucket client, TSDB planner and compactor using the context. // Useful for injecting mock objects from tests. - createBucketClientAndTsdbCompactor func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error) + createDependencies func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error) // Users scanner, used to discover users from the bucket. usersScanner *UsersScanner @@ -109,8 +109,9 @@ type Compactor struct { // Blocks cleaner is responsible to hard delete blocks marked for deletion. blocksCleaner *BlocksCleaner - // Underlying compactor used to compact TSDB blocks. + // Underlying compactor and planner used to compact TSDB blocks. tsdbCompactor tsdb.Compactor + tsdbPlanner compact.Planner // Client used to run operations on the bucket storing blocks. bucketClient objstore.Bucket @@ -135,17 +136,22 @@ type Compactor struct { // NewCompactor makes a new Compactor. func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, registerer prometheus.Registerer) (*Compactor, error) { - createBucketClientAndTsdbCompactor := func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error) { + createDependencies := func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error) { bucketClient, err := cortex_tsdb.NewBucketClient(ctx, storageCfg.Bucket, "compactor", logger, registerer) if err != nil { - return nil, nil, errors.Wrap(err, "failed to create the bucket client") + return nil, nil, nil, errors.Wrap(err, "failed to create the bucket client") } compactor, err := tsdb.NewLeveledCompactor(ctx, registerer, logger, compactorCfg.BlockRanges.ToMilliseconds(), downsample.NewPool()) - return bucketClient, compactor, err + if err != nil { + return nil, nil, nil, err + } + + planner := compact.NewTSDBBasedPlanner(logger, compactorCfg.BlockRanges.ToMilliseconds()) + return bucketClient, compactor, planner, nil } - cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, createBucketClientAndTsdbCompactor) + cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, createDependencies) if err != nil { return nil, errors.Wrap(err, "failed to create Cortex blocks compactor") } @@ -158,16 +164,16 @@ func newCompactor( storageCfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, registerer prometheus.Registerer, - createBucketClientAndTsdbCompactor func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error), + createDependencies func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error), ) (*Compactor, error) { c := &Compactor{ - compactorCfg: compactorCfg, - storageCfg: storageCfg, - parentLogger: logger, - logger: log.With(logger, "component", "compactor"), - registerer: registerer, - syncerMetrics: newSyncerMetrics(registerer), - createBucketClientAndTsdbCompactor: createBucketClientAndTsdbCompactor, + compactorCfg: compactorCfg, + storageCfg: storageCfg, + parentLogger: logger, + logger: log.With(logger, "component", "compactor"), + registerer: registerer, + syncerMetrics: newSyncerMetrics(registerer), + createDependencies: createDependencies, compactionRunsStarted: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ Name: "cortex_compactor_runs_started_total", @@ -223,7 +229,7 @@ func (c *Compactor) starting(ctx context.Context) error { var err error // Create bucket client and compactor. - c.bucketClient, c.tsdbCompactor, err = c.createBucketClientAndTsdbCompactor(ctx) + c.bucketClient, c.tsdbCompactor, c.tsdbPlanner, err = c.createDependencies(ctx) if err != nil { return errors.Wrap(err, "failed to initialize compactor objects") } @@ -472,6 +478,7 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { ulogger, syncer, grouper, + c.tsdbPlanner, c.tsdbCompactor, path.Join(c.compactorCfg.DataDir, "compact"), bucket, diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 66584dda5c9..b0c0b1157ec 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/objstore" "gopkg.in/yaml.v2" @@ -86,7 +87,7 @@ func TestCompactor_ShouldDoNothingOnNoUserBlocks(t *testing.T) { bucketClient := &cortex_tsdb.BucketClientMock{} bucketClient.MockIter("", []string{}, nil) - c, _, logs, registry, cleanup := prepare(t, prepareConfig(), bucketClient) + c, _, _, logs, registry, cleanup := prepare(t, prepareConfig(), bucketClient) defer cleanup() require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) @@ -229,7 +230,7 @@ func TestCompactor_ShouldRetryCompactionOnFailureWhileDiscoveringUsersFromBucket bucketClient := &cortex_tsdb.BucketClientMock{} bucketClient.MockIter("", nil, errors.New("failed to iterate the bucket")) - c, _, logs, registry, cleanup := prepare(t, prepareConfig(), bucketClient) + c, _, _, logs, registry, cleanup := prepare(t, prepareConfig(), bucketClient) defer cleanup() require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) @@ -385,14 +386,14 @@ func TestCompactor_ShouldIterateOverUsersAndRunCompaction(t *testing.T) { bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", mockBlockMetaJSON("01DTW0ZCPDDNV4BV83Q2SV4QAZ"), nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/deletion-mark.json", "", nil) - c, tsdbCompactor, logs, registry, cleanup := prepare(t, prepareConfig(), bucketClient) + c, _, tsdbPlanner, logs, registry, cleanup := prepare(t, prepareConfig(), bucketClient) defer cleanup() - // Mock the compactor as if there's no compaction to do, + // Mock the planner as if there's no compaction to do, // in order to simplify tests (all in all, we just want to // test our logic and not TSDB compactor which we expect to // be already tested). - tsdbCompactor.On("Plan", mock.Anything).Return([]string{}, nil) + tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) @@ -404,7 +405,7 @@ func TestCompactor_ShouldIterateOverUsersAndRunCompaction(t *testing.T) { require.NoError(t, services.StopAndAwaitTerminated(context.Background(), c)) // Ensure a plan has been executed for the blocks of each user. - tsdbCompactor.AssertNumberOfCalls(t, "Plan", 2) + tsdbPlanner.AssertNumberOfCalls(t, "Plan", 2) assert.ElementsMatch(t, []string{ `level=info component=cleaner msg="started hard deletion of blocks marked for deletion"`, @@ -496,14 +497,14 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForDeletion(t *testing.T) { bucketClient.MockDelete("user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/deletion-mark.json", nil) bucketClient.MockDelete("user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ", nil) - c, tsdbCompactor, logs, registry, cleanup := prepare(t, cfg, bucketClient) + c, _, tsdbPlanner, logs, registry, cleanup := prepare(t, cfg, bucketClient) defer cleanup() - // Mock the compactor as if there's no compaction to do, + // Mock the planner as if there's no compaction to do, // in order to simplify tests (all in all, we just want to // test our logic and not TSDB compactor which we expect to // be already tested). - tsdbCompactor.On("Plan", mock.Anything).Return([]string{}, nil) + tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) @@ -515,7 +516,7 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForDeletion(t *testing.T) { require.NoError(t, services.StopAndAwaitTerminated(context.Background(), c)) // Only one user's block is compacted. - tsdbCompactor.AssertNumberOfCalls(t, "Plan", 1) + tsdbPlanner.AssertNumberOfCalls(t, "Plan", 1) assert.ElementsMatch(t, []string{ `level=info component=cleaner msg="started hard deletion of blocks marked for deletion"`, @@ -600,14 +601,14 @@ func TestCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInstanceRunni cfg.ShardingRing.InstanceAddr = "1.2.3.4" cfg.ShardingRing.KVStore.Mock = consul.NewInMemoryClient(ring.GetCodec()) - c, tsdbCompactor, logs, _, cleanup := prepare(t, cfg, bucketClient) + c, _, tsdbPlanner, logs, _, cleanup := prepare(t, cfg, bucketClient) defer cleanup() - // Mock the compactor as if there's no compaction to do, + // Mock the planner as if there's no compaction to do, // in order to simplify tests (all in all, we just want to // test our logic and not TSDB compactor which we expect to // be already tested). - tsdbCompactor.On("Plan", mock.Anything).Return([]string{}, nil) + tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) @@ -619,7 +620,7 @@ func TestCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInstanceRunni require.NoError(t, services.StopAndAwaitTerminated(context.Background(), c)) // Ensure a plan has been executed for the blocks of each user. - tsdbCompactor.AssertNumberOfCalls(t, "Plan", 2) + tsdbPlanner.AssertNumberOfCalls(t, "Plan", 2) assert.ElementsMatch(t, []string{ `level=info component=compactor msg="waiting until compactor is ACTIVE in the ring"`, @@ -683,18 +684,18 @@ func TestCompactor_ShouldCompactOnlyUsersOwnedByTheInstanceOnShardingEnabledAndM cfg.ShardingRing.WaitStabilityMaxDuration = 10 * time.Second cfg.ShardingRing.KVStore.Mock = kvstore - c, tsdbCompactor, l, _, cleanup := prepare(t, cfg, bucketClient) + c, _, tsdbPlanner, l, _, cleanup := prepare(t, cfg, bucketClient) defer services.StopAndAwaitTerminated(context.Background(), c) //nolint:errcheck defer cleanup() compactors = append(compactors, c) logs = append(logs, l) - // Mock the compactor as if there's no compaction to do, + // Mock the planner as if there's no compaction to do, // in order to simplify tests (all in all, we just want to // test our logic and not TSDB compactor which we expect to // be already tested). - tsdbCompactor.On("Plan", mock.Anything).Return([]string{}, nil) + tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) } // Start all compactors @@ -844,7 +845,7 @@ func prepareConfig() Config { return compactorCfg } -func prepare(t *testing.T, compactorCfg Config, bucketClient objstore.Bucket) (*Compactor, *tsdbCompactorMock, *concurrency.SyncBuffer, prometheus.Gatherer, func()) { +func prepare(t *testing.T, compactorCfg Config, bucketClient objstore.Bucket) (*Compactor, *tsdbCompactorMock, *tsdbPlannerMock, *concurrency.SyncBuffer, prometheus.Gatherer, func()) { storageCfg := cortex_tsdb.BlocksStorageConfig{} flagext.DefaultValues(&storageCfg) @@ -858,16 +859,17 @@ func prepare(t *testing.T, compactorCfg Config, bucketClient objstore.Bucket) (* } tsdbCompactor := &tsdbCompactorMock{} + tsdbPlanner := &tsdbPlannerMock{} logs := &concurrency.SyncBuffer{} logger := log.NewLogfmtLogger(logs) registry := prometheus.NewRegistry() - c, err := newCompactor(compactorCfg, storageCfg, logger, registry, func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error) { - return bucketClient, tsdbCompactor, nil + c, err := newCompactor(compactorCfg, storageCfg, logger, registry, func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error) { + return bucketClient, tsdbCompactor, tsdbPlanner, nil }) require.NoError(t, err) - return c, tsdbCompactor, logs, registry, cleanup + return c, tsdbCompactor, tsdbPlanner, logs, registry, cleanup } type tsdbCompactorMock struct { @@ -889,6 +891,15 @@ func (m *tsdbCompactorMock) Compact(dest string, dirs []string, open []*tsdb.Blo return args.Get(0).(ulid.ULID), args.Error(1) } +type tsdbPlannerMock struct { + mock.Mock +} + +func (m *tsdbPlannerMock) Plan(ctx context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) { + args := m.Called(ctx, metasByMinTime) + return args.Get(0).([]*metadata.Meta), args.Error(1) +} + func mockBlockMetaJSON(id string) string { meta := tsdb.BlockMeta{ Version: 1, diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index 06468c7c302..05d189d414d 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -262,6 +262,11 @@ type BucketStoreConfig struct { MetadataCache MetadataCacheConfig `yaml:"metadata_cache"` IgnoreDeletionMarksDelay time.Duration `yaml:"ignore_deletion_mark_delay"` + // Controls whether index-header lazy loading is enabled. This config option is hidden + // while it is marked as experimental. + IndexHeaderLazyLoadingEnabled bool `yaml:"index_header_lazy_loading_enabled" doc:"hidden"` + IndexHeaderLazyLoadingIdleTimeout time.Duration `yaml:"index_header_lazy_loading_idle_timeout" doc:"hidden"` + // Controls what is the ratio of postings offsets store will hold in memory. // Larger value will keep less offsets, which will increase CPU cycles needed for query touching those postings. // It's meant for setups that want low baseline memory pressure and where less traffic is expected. @@ -288,6 +293,8 @@ func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) { "The idea of ignore-deletion-marks-delay is to ignore blocks that are marked for deletion with some delay. This ensures store can still serve blocks that are meant to be deleted but do not have a replacement yet. "+ "Default is 6h, half of the default value for -compactor.deletion-delay.") f.IntVar(&cfg.PostingOffsetsInMemSampling, "blocks-storage.bucket-store.posting-offsets-in-mem-sampling", store.DefaultPostingOffsetInMemorySampling, "Controls what is the ratio of postings offsets that the store will hold in memory.") + f.BoolVar(&cfg.IndexHeaderLazyLoadingEnabled, "blocks-storage.bucket-store.index-header-lazy-loading-enabled", false, "If enabled, store-gateway will lazy load an index-header only once required by a query.") + f.DurationVar(&cfg.IndexHeaderLazyLoadingIdleTimeout, "blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout", 20*time.Minute, "If index-header lazy loading is enabled and this setting is > 0, the store-gateway will offload unused index-headers after 'idle timeout' inactivity.") } // Validate the config. diff --git a/pkg/storegateway/bucket_store_metrics.go b/pkg/storegateway/bucket_store_metrics.go index 35cbd9992c8..9cd96e13d45 100644 --- a/pkg/storegateway/bucket_store_metrics.go +++ b/pkg/storegateway/bucket_store_metrics.go @@ -40,6 +40,12 @@ type BucketStoreMetrics struct { seriesFetchDuration *prometheus.Desc postingsFetchDuration *prometheus.Desc + + indexHeaderLazyLoadCount *prometheus.Desc + indexHeaderLazyLoadFailedCount *prometheus.Desc + indexHeaderLazyUnloadCount *prometheus.Desc + indexHeaderLazyUnloadFailedCount *prometheus.Desc + indexHeaderLazyLoadDuration *prometheus.Desc } func NewBucketStoreMetrics() *BucketStoreMetrics { @@ -137,6 +143,27 @@ func NewBucketStoreMetrics() *BucketStoreMetrics { "cortex_bucket_store_cached_postings_fetch_duration_seconds", "Time it takes to fetch postings to respond a request sent to store-gateway. It includes both the time to fetch it from cache and from storage in case of cache misses.", nil, nil), + + indexHeaderLazyLoadCount: prometheus.NewDesc( + "cortex_bucket_store_indexheader_lazy_load_total", + "Total number of index-header lazy load operations.", + nil, nil), + indexHeaderLazyLoadFailedCount: prometheus.NewDesc( + "cortex_bucket_store_indexheader_lazy_load_failed_total", + "Total number of failed index-header lazy load operations.", + nil, nil), + indexHeaderLazyUnloadCount: prometheus.NewDesc( + "cortex_bucket_store_indexheader_lazy_unload_total", + "Total number of index-header lazy unload operations.", + nil, nil), + indexHeaderLazyUnloadFailedCount: prometheus.NewDesc( + "cortex_bucket_store_indexheader_lazy_unload_failed_total", + "Total number of failed index-header lazy unload operations.", + nil, nil), + indexHeaderLazyLoadDuration: prometheus.NewDesc( + "cortex_bucket_store_indexheader_lazy_load_duration_seconds", + "Duration of the index-header lazy loading in seconds.", + nil, nil), } } @@ -183,6 +210,12 @@ func (m *BucketStoreMetrics) Describe(out chan<- *prometheus.Desc) { out <- m.seriesFetchDuration out <- m.postingsFetchDuration + + out <- m.indexHeaderLazyLoadCount + out <- m.indexHeaderLazyLoadFailedCount + out <- m.indexHeaderLazyUnloadCount + out <- m.indexHeaderLazyUnloadFailedCount + out <- m.indexHeaderLazyLoadDuration } func (m *BucketStoreMetrics) Collect(out chan<- prometheus.Metric) { @@ -215,4 +248,10 @@ func (m *BucketStoreMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfHistograms(out, m.seriesFetchDuration, "thanos_bucket_store_cached_series_fetch_duration_seconds") data.SendSumOfHistograms(out, m.postingsFetchDuration, "thanos_bucket_store_cached_postings_fetch_duration_seconds") + + data.SendSumOfCounters(out, m.indexHeaderLazyLoadCount, "thanos_bucket_store_indexheader_lazy_load_total") + data.SendSumOfCounters(out, m.indexHeaderLazyLoadFailedCount, "thanos_bucket_store_indexheader_lazy_load_failed_total") + data.SendSumOfCounters(out, m.indexHeaderLazyUnloadCount, "thanos_bucket_store_indexheader_lazy_unload_total") + data.SendSumOfCounters(out, m.indexHeaderLazyUnloadFailedCount, "thanos_bucket_store_indexheader_lazy_unload_failed_total") + data.SendSumOfHistograms(out, m.indexHeaderLazyLoadDuration, "thanos_bucket_store_indexheader_lazy_load_duration_seconds") } diff --git a/pkg/storegateway/bucket_store_metrics_test.go b/pkg/storegateway/bucket_store_metrics_test.go index 3aa3a031a7a..85fc2610554 100644 --- a/pkg/storegateway/bucket_store_metrics_test.go +++ b/pkg/storegateway/bucket_store_metrics_test.go @@ -199,6 +199,37 @@ func TestBucketStoreMetrics(t *testing.T) { cortex_bucket_store_cached_postings_fetch_duration_seconds_bucket{le="+Inf"} 3 cortex_bucket_store_cached_postings_fetch_duration_seconds_sum 1.328621e+06 cortex_bucket_store_cached_postings_fetch_duration_seconds_count 3 + + # HELP cortex_bucket_store_indexheader_lazy_load_duration_seconds Duration of the index-header lazy loading in seconds. + # TYPE cortex_bucket_store_indexheader_lazy_load_duration_seconds histogram + cortex_bucket_store_indexheader_lazy_load_duration_seconds_bucket{le="0.01"} 0 + cortex_bucket_store_indexheader_lazy_load_duration_seconds_bucket{le="0.02"} 0 + cortex_bucket_store_indexheader_lazy_load_duration_seconds_bucket{le="0.05"} 0 + cortex_bucket_store_indexheader_lazy_load_duration_seconds_bucket{le="0.1"} 0 + cortex_bucket_store_indexheader_lazy_load_duration_seconds_bucket{le="0.2"} 0 + cortex_bucket_store_indexheader_lazy_load_duration_seconds_bucket{le="0.5"} 0 + cortex_bucket_store_indexheader_lazy_load_duration_seconds_bucket{le="1"} 3 + cortex_bucket_store_indexheader_lazy_load_duration_seconds_bucket{le="2"} 3 + cortex_bucket_store_indexheader_lazy_load_duration_seconds_bucket{le="5"} 3 + cortex_bucket_store_indexheader_lazy_load_duration_seconds_bucket{le="+Inf"} 3 + cortex_bucket_store_indexheader_lazy_load_duration_seconds_sum 1.9500000000000002 + cortex_bucket_store_indexheader_lazy_load_duration_seconds_count 3 + + # HELP cortex_bucket_store_indexheader_lazy_load_failed_total Total number of failed index-header lazy load operations. + # TYPE cortex_bucket_store_indexheader_lazy_load_failed_total counter + cortex_bucket_store_indexheader_lazy_load_failed_total 1.373659e+06 + + # HELP cortex_bucket_store_indexheader_lazy_load_total Total number of index-header lazy load operations. + # TYPE cortex_bucket_store_indexheader_lazy_load_total counter + cortex_bucket_store_indexheader_lazy_load_total 1.35114e+06 + + # HELP cortex_bucket_store_indexheader_lazy_unload_failed_total Total number of failed index-header lazy unload operations. + # TYPE cortex_bucket_store_indexheader_lazy_unload_failed_total counter + cortex_bucket_store_indexheader_lazy_unload_failed_total 1.418697e+06 + + # HELP cortex_bucket_store_indexheader_lazy_unload_total Total number of index-header lazy unload operations. + # TYPE cortex_bucket_store_indexheader_lazy_unload_total counter + cortex_bucket_store_indexheader_lazy_unload_total 1.396178e+06 `)) require.NoError(t, err) } @@ -298,6 +329,12 @@ func populateMockedBucketStoreMetrics(base float64) *prometheus.Registry { m.seriesFetchDuration.Observe(58 * base) m.postingsFetchDuration.Observe(59 * base) + m.indexHeaderLazyLoadCount.Add(60 * base) + m.indexHeaderLazyLoadFailedCount.Add(61 * base) + m.indexHeaderLazyUnloadCount.Add(62 * base) + m.indexHeaderLazyUnloadFailedCount.Add(63 * base) + m.indexHeaderLazyLoadDuration.Observe(0.65) + return reg } @@ -328,6 +365,12 @@ type mockedBucketStoreMetrics struct { seriesFetchDuration prometheus.Histogram postingsFetchDuration prometheus.Histogram + + indexHeaderLazyLoadCount prometheus.Counter + indexHeaderLazyLoadFailedCount prometheus.Counter + indexHeaderLazyUnloadCount prometheus.Counter + indexHeaderLazyUnloadFailedCount prometheus.Counter + indexHeaderLazyLoadDuration prometheus.Histogram } func newMockedBucketStoreMetrics(reg prometheus.Registerer) *mockedBucketStoreMetrics { @@ -440,5 +483,27 @@ func newMockedBucketStoreMetrics(reg prometheus.Registerer) *mockedBucketStoreMe Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, }) + m.indexHeaderLazyLoadCount = promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "thanos_bucket_store_indexheader_lazy_load_total", + Help: "Total number of index-header lazy load operations.", + }) + m.indexHeaderLazyLoadFailedCount = promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "thanos_bucket_store_indexheader_lazy_load_failed_total", + Help: "Total number of failed index-header lazy load operations.", + }) + m.indexHeaderLazyUnloadCount = promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "thanos_bucket_store_indexheader_lazy_unload_total", + Help: "Total number of index-header lazy unload operations.", + }) + m.indexHeaderLazyUnloadFailedCount = promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "thanos_bucket_store_indexheader_lazy_unload_failed_total", + Help: "Total number of failed index-header lazy unload operations.", + }) + m.indexHeaderLazyLoadDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "thanos_bucket_store_indexheader_lazy_load_duration_seconds", + Help: "Duration of the index-header lazy loading in seconds.", + Buckets: []float64{0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5}, + }) + return &m } diff --git a/pkg/storegateway/bucket_stores.go b/pkg/storegateway/bucket_stores.go index 7496b3f3dd0..2b5dbb1fa69 100644 --- a/pkg/storegateway/bucket_stores.go +++ b/pkg/storegateway/bucket_stores.go @@ -347,6 +347,8 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro u.cfg.BucketStore.IndexCache.PostingsCompression, u.cfg.BucketStore.PostingOffsetsInMemSampling, true, // Enable series hints. + u.cfg.BucketStore.IndexHeaderLazyLoadingEnabled, + u.cfg.BucketStore.IndexHeaderLazyLoadingIdleTimeout, ) if err != nil { return nil, err diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index 357e1f0d394..b36fc71dc48 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -2,11 +2,14 @@ package storegateway import ( "context" + "fmt" "io/ioutil" + "math" "os" "path/filepath" "strings" "testing" + "time" "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" @@ -43,7 +46,7 @@ func TestBucketStores_InitialSync(t *testing.T) { require.NoError(t, err) for userID, metricName := range userToMetric { - generateStorageBlock(t, storageDir, userID, metricName, 10, 100) + generateStorageBlock(t, storageDir, userID, metricName, 10, 100, 15) } bucket, err := filesystem.NewBucketClient(filesystem.Config{Directory: storageDir}) @@ -130,7 +133,7 @@ func TestBucketStores_SyncBlocks(t *testing.T) { require.NoError(t, err) // Run an initial sync to discover 1 block. - generateStorageBlock(t, storageDir, userID, metricName, 10, 100) + generateStorageBlock(t, storageDir, userID, metricName, 10, 100, 15) require.NoError(t, stores.InitialSync(ctx)) // Query a range for which we have no samples. @@ -140,7 +143,7 @@ func TestBucketStores_SyncBlocks(t *testing.T) { assert.Empty(t, seriesSet) // Generate another block and sync blocks again. - generateStorageBlock(t, storageDir, userID, metricName, 100, 200) + generateStorageBlock(t, storageDir, userID, metricName, 100, 200, 15) require.NoError(t, stores.SyncBlocks(ctx)) seriesSet, warnings, err = querySeries(stores, userID, metricName, 150, 180) @@ -227,6 +230,83 @@ func TestBucketStores_syncUsersBlocks(t *testing.T) { } } +func TestBucketStores_Series_ShouldCorrectlyQuerySeriesSpanningMultipleChunks(t *testing.T) { + for _, lazyLoadingEnabled := range []bool{true, false} { + t.Run(fmt.Sprintf("lazy loading enabled = %v", lazyLoadingEnabled), func(t *testing.T) { + testBucketStoresSeriesShouldCorrectlyQuerySeriesSpanningMultipleChunks(t, lazyLoadingEnabled) + }) + } +} + +func testBucketStoresSeriesShouldCorrectlyQuerySeriesSpanningMultipleChunks(t *testing.T, lazyLoadingEnabled bool) { + const ( + userID = "user-1" + metricName = "series_1" + ) + + ctx := context.Background() + cfg, cleanup := prepareStorageConfig(t) + cfg.BucketStore.IndexHeaderLazyLoadingEnabled = lazyLoadingEnabled + cfg.BucketStore.IndexHeaderLazyLoadingIdleTimeout = time.Minute + defer cleanup() + + storageDir, err := ioutil.TempDir(os.TempDir(), "storage-*") + require.NoError(t, err) + + // Generate a single block with 1 series and a lot of samples. + generateStorageBlock(t, storageDir, userID, metricName, 0, 10000, 1) + + bucket, err := filesystem.NewBucketClient(filesystem.Config{Directory: storageDir}) + require.NoError(t, err) + + reg := prometheus.NewPedanticRegistry() + stores, err := NewBucketStores(cfg, NewNoShardingStrategy(), bucket, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), reg) + require.NoError(t, err) + require.NoError(t, stores.InitialSync(ctx)) + + tests := map[string]struct { + reqMinTime int64 + reqMaxTime int64 + expectedSamples int + }{ + "query the entire block": { + reqMinTime: math.MinInt64, + reqMaxTime: math.MaxInt64, + expectedSamples: 10000, + }, + "query the beginning of the block": { + reqMinTime: 0, + reqMaxTime: 100, + expectedSamples: store.MaxSamplesPerChunk, + }, + "query the middle of the block": { + reqMinTime: 4000, + reqMaxTime: 4050, + expectedSamples: store.MaxSamplesPerChunk, + }, + "query the end of the block": { + reqMinTime: 9800, + reqMaxTime: 10000, + expectedSamples: (store.MaxSamplesPerChunk * 2) + (10000 % store.MaxSamplesPerChunk), + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + // Query a range for which we have no samples. + seriesSet, warnings, err := querySeries(stores, userID, metricName, testData.reqMinTime, testData.reqMaxTime) + require.NoError(t, err) + assert.Empty(t, warnings) + assert.Len(t, seriesSet, 1) + + // Count returned samples. + samples, err := readSamplesFromChunks(seriesSet[0].Chunks) + require.NoError(t, err) + assert.Equal(t, testData.expectedSamples, len(samples)) + }) + } +} + func prepareStorageConfig(t *testing.T) (cortex_tsdb.BlocksStorageConfig, func()) { tmpDir, err := ioutil.TempDir(os.TempDir(), "blocks-sync-*") require.NoError(t, err) @@ -242,9 +322,7 @@ func prepareStorageConfig(t *testing.T) (cortex_tsdb.BlocksStorageConfig, func() return cfg, cleanup } -func generateStorageBlock(t *testing.T, storageDir, userID string, metricName string, minT, maxT int64) { - const step = 15 - +func generateStorageBlock(t *testing.T, storageDir, userID string, metricName string, minT, maxT int64, step int) { // Create a directory for the user (if doesn't already exist). userDir := filepath.Join(storageDir, userID) if _, err := os.Stat(userDir); err != nil { @@ -268,7 +346,7 @@ func generateStorageBlock(t *testing.T, storageDir, userID string, metricName st series := labels.Labels{labels.Label{Name: labels.MetricName, Value: metricName}} app := db.Appender(context.Background()) - for ts := minT; ts < maxT; ts += step { + for ts := minT; ts < maxT; ts += int64(step) { _, err = app.Add(series, ts, 1) require.NoError(t, err) } diff --git a/tools/blocksconvert/builder/tsdb.go b/tools/blocksconvert/builder/tsdb.go index ab15a197f49..02f5c89c969 100644 --- a/tools/blocksconvert/builder/tsdb.go +++ b/tools/blocksconvert/builder/tsdb.go @@ -207,6 +207,8 @@ func (d *tsdbBuilder) finishBlock(source string, labels map[string]string) (ulid }, }, + // We populate SegmentFiles (which is deprecated, but still used). The new Files property + // will be populated by Thanos block.Upload(). Thanos: metadata.Thanos{ Labels: labels, Source: metadata.SourceType(source), @@ -278,7 +280,7 @@ func (d *tsdbBuilder) finishBlock(source string, labels map[string]string) (ulid return ulid.ULID{}, errors.Wrap(err, "deleting unsorted chunks") } - if err := metadata.Write(d.log, d.tmpBlockDir, meta); err != nil { + if err := meta.WriteToDir(d.log, d.tmpBlockDir); err != nil { return ulid.ULID{}, errors.Wrap(err, "writing meta.json") } diff --git a/vendor/github.com/lann/builder/.travis.yml b/vendor/github.com/lann/builder/.travis.yml index 8687342e9d4..c8860f69bc7 100644 --- a/vendor/github.com/lann/builder/.travis.yml +++ b/vendor/github.com/lann/builder/.travis.yml @@ -1,6 +1,7 @@ language: go go: - - 1.1 - - 1.2 + - '1.8' + - '1.9' + - '1.10' - tip diff --git a/vendor/github.com/lann/builder/LICENSE b/vendor/github.com/lann/builder/LICENSE new file mode 100644 index 00000000000..a109e8051c1 --- /dev/null +++ b/vendor/github.com/lann/builder/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2014-2015 Lann Martin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lann/builder/registry.go b/vendor/github.com/lann/builder/registry.go index 182e69f407f..612845418e2 100644 --- a/vendor/github.com/lann/builder/registry.go +++ b/vendor/github.com/lann/builder/registry.go @@ -1,8 +1,14 @@ package builder -import "reflect" +import ( + "reflect" + "sync" +) -var registry = make(map[reflect.Type]reflect.Type) +var ( + registry = make(map[reflect.Type]reflect.Type) + registryMux sync.RWMutex +) // RegisterType maps the given builderType to a structType. // This mapping affects the type of slices returned by Get and is required for @@ -13,6 +19,8 @@ var registry = make(map[reflect.Type]reflect.Type) // RegisterType will panic if builderType's underlying type is not Builder or // if structType's Kind is not Struct. func RegisterType(builderType reflect.Type, structType reflect.Type) *reflect.Value { + registryMux.Lock() + defer registryMux.Unlock() structType.NumField() // Panic if structType is not a struct registry[builderType] = structType emptyValue := emptyBuilderValue.Convert(builderType) @@ -23,7 +31,7 @@ func RegisterType(builderType reflect.Type, structType reflect.Type) *reflect.Va // // Returns an empty instance of the registered builder type which can be used // as the initial value for builder expressions. See example. -func Register(builderProto interface{}, structProto interface{}) interface{} { +func Register(builderProto, structProto interface{}) interface{} { empty := RegisterType( reflect.TypeOf(builderProto), reflect.TypeOf(structProto), @@ -32,6 +40,8 @@ func Register(builderProto interface{}, structProto interface{}) interface{} { } func getBuilderStructType(builderType reflect.Type) *reflect.Type { + registryMux.RLock() + defer registryMux.RUnlock() structType, ok := registry[builderType] if !ok { return nil diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go index 550ebc351f9..c8e63d609bd 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/block.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -14,6 +14,7 @@ import ( "os" "path" "path/filepath" + "sort" "strings" "time" @@ -66,6 +67,7 @@ func Download(ctx context.Context, logger log.Logger, bucket objstore.Bucket, id // It makes sure cleanup is done on error to avoid partial block uploads. // It also verifies basic features of Thanos block. // TODO(bplotka): Ensure bucket operations have reasonable backoff retries. +// NOTE: Upload updates `meta.Thanos.File` section. func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir string) error { df, err := os.Stat(bdir) if err != nil { @@ -91,8 +93,18 @@ func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st return errors.New("empty external labels are not allowed for Thanos block.") } - if err := objstore.UploadFile(ctx, logger, bkt, path.Join(bdir, MetaFilename), path.Join(DebugMetas, fmt.Sprintf("%s.json", id))); err != nil { - return errors.Wrap(err, "upload meta file to debug dir") + meta.Thanos.Files, err = gatherFileStats(bdir) + if err != nil { + return errors.Wrap(err, "gather meta file stats") + } + + metaEncoded := bytes.Buffer{} + if err := meta.Write(&metaEncoded); err != nil { + return errors.Wrap(err, "encode meta file") + } + + if err := bkt.Upload(ctx, path.Join(DebugMetas, fmt.Sprintf("%s.json", id)), bytes.NewReader(metaEncoded.Bytes())); err != nil { + return cleanUp(logger, bkt, id, errors.Wrap(err, "upload debug meta file")) } if err := objstore.UploadDir(ctx, logger, bkt, path.Join(bdir, ChunksDirname), path.Join(id.String(), ChunksDirname)); err != nil { @@ -103,9 +115,8 @@ func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st return cleanUp(logger, bkt, id, errors.Wrap(err, "upload index")) } - // Meta.json always need to be uploaded as a last item. This will allow to assume block directories without meta file - // to be pending uploads. - if err := objstore.UploadFile(ctx, logger, bkt, path.Join(bdir, MetaFilename), path.Join(id.String(), MetaFilename)); err != nil { + // Meta.json always need to be uploaded as a last item. This will allow to assume block directories without meta file to be pending uploads. + if err := bkt.Upload(ctx, path.Join(id.String(), MetaFilename), &metaEncoded); err != nil { return cleanUp(logger, bkt, id, errors.Wrap(err, "upload meta file")) } @@ -122,7 +133,7 @@ func cleanUp(logger log.Logger, bkt objstore.Bucket, id ulid.ULID, err error) er } // MarkForDeletion creates a file which stores information about when the block was marked for deletion. -func MarkForDeletion(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, markedForDeletion prometheus.Counter) error { +func MarkForDeletion(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, details string, markedForDeletion prometheus.Counter) error { deletionMarkFile := path.Join(id.String(), metadata.DeletionMarkFilename) deletionMarkExists, err := bkt.Exists(ctx, deletionMarkFile) if err != nil { @@ -137,6 +148,7 @@ func MarkForDeletion(ctx context.Context, logger log.Logger, bkt objstore.Bucket ID: id, DeletionTime: time.Now().Unix(), Version: metadata.DeletionMarkVersion1, + Details: details, }) if err != nil { return errors.Wrap(err, "json encode deletion mark") @@ -226,9 +238,7 @@ func IsBlockDir(path string) (id ulid.ULID, ok bool) { // GetSegmentFiles returns list of segment files for given block. Paths are relative to the chunks directory. // In case of errors, nil is returned. func GetSegmentFiles(blockDir string) []string { - chunksDir := filepath.Join(blockDir, ChunksDirname) - - files, err := ioutil.ReadDir(chunksDir) + files, err := ioutil.ReadDir(filepath.Join(blockDir, ChunksDirname)) if err != nil { return nil } @@ -240,3 +250,70 @@ func GetSegmentFiles(blockDir string) []string { } return result } + +// TODO(bwplotka): Gather stats when dirctly uploading files. +func gatherFileStats(blockDir string) (res []metadata.File, _ error) { + files, err := ioutil.ReadDir(filepath.Join(blockDir, ChunksDirname)) + if err != nil { + return nil, errors.Wrapf(err, "read dir %v", filepath.Join(blockDir, ChunksDirname)) + } + for _, f := range files { + res = append(res, metadata.File{ + RelPath: filepath.Join(ChunksDirname, f.Name()), + SizeBytes: f.Size(), + }) + } + + indexFile, err := os.Stat(filepath.Join(blockDir, IndexFilename)) + if err != nil { + return nil, errors.Wrapf(err, "stat %v", filepath.Join(blockDir, IndexFilename)) + } + res = append(res, metadata.File{ + RelPath: indexFile.Name(), + SizeBytes: indexFile.Size(), + }) + + metaFile, err := os.Stat(filepath.Join(blockDir, MetaFilename)) + if err != nil { + return nil, errors.Wrapf(err, "stat %v", filepath.Join(blockDir, MetaFilename)) + } + res = append(res, metadata.File{RelPath: metaFile.Name()}) + + sort.Slice(res, func(i, j int) bool { + return strings.Compare(res[i].RelPath, res[j].RelPath) < 0 + }) + // TODO(bwplotka): Add optional files like tombstones? + return res, err +} + +// MarkForNoCompact creates a file which marks block to be not compacted. +func MarkForNoCompact(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, reason metadata.NoCompactReason, details string, markedForNoCompact prometheus.Counter) error { + m := path.Join(id.String(), metadata.NoCompactMarkFilename) + noCompactMarkExists, err := bkt.Exists(ctx, m) + if err != nil { + return errors.Wrapf(err, "check exists %s in bucket", m) + } + if noCompactMarkExists { + level.Warn(logger).Log("msg", "requested to mark for no compaction, but file already exists; this should not happen; investigate", "err", errors.Errorf("file %s already exists in bucket", m)) + return nil + } + + noCompactMark, err := json.Marshal(metadata.NoCompactMark{ + ID: id, + Version: metadata.NoCompactMarkVersion1, + + NoCompactTime: time.Now().Unix(), + Reason: reason, + Details: details, + }) + if err != nil { + return errors.Wrap(err, "json encode no compact mark") + } + + if err := bkt.Upload(ctx, m, bytes.NewBuffer(noCompactMark)); err != nil { + return errors.Wrapf(err, "upload file %s to bucket", m) + } + markedForNoCompact.Inc() + level.Info(logger).Log("msg", "block has been marked for no compaction", "block", id) + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index e058e22172a..129b827f8e6 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -71,6 +71,9 @@ const ( // but don't have a replacement block yet. markedForDeletionMeta = "marked-for-deletion" + // MarkedForNoCompactionMeta is label for blocks which are loaded but also marked for no compaction. This label is also counted in `loaded` label metric. + MarkedForNoCompactionMeta = "marked-for-no-compact" + // Modified label values. replicaRemovedMeta = "replica-label-removed" ) @@ -111,6 +114,7 @@ func newFetcherMetrics(reg prometheus.Registerer) *fetcherMetrics { []string{timeExcludedMeta}, []string{duplicateMeta}, []string{markedForDeletionMeta}, + []string{MarkedForNoCompactionMeta}, ) m.modified = extprom.NewTxGaugeVec( reg, @@ -258,7 +262,7 @@ func (f *BaseFetcher) loadMeta(ctx context.Context, id ulid.ULID) (*metadata.Met return nil, errors.Wrapf(ErrorSyncMetaCorrupted, "meta.json %v unmarshal: %v", metaFile, err) } - if m.Version != metadata.MetaVersion1 { + if m.Version != metadata.TSDBVersion1 { return nil, errors.Errorf("unexpected meta file: %s version: %d", metaFile, m.Version) } @@ -268,7 +272,7 @@ func (f *BaseFetcher) loadMeta(ctx context.Context, id ulid.ULID) (*metadata.Met level.Warn(f.logger).Log("msg", "best effort mkdir of the meta.json block dir failed; ignoring", "dir", cachedBlockDir, "err", err) } - if err := metadata.Write(f.logger, cachedBlockDir, m); err != nil { + if err := m.WriteToDir(f.logger, cachedBlockDir); err != nil { level.Warn(f.logger).Log("msg", "best effort save of the meta.json to local dir failed; ignoring", "dir", cachedBlockDir, "err", err) } } @@ -782,19 +786,20 @@ func (f *IgnoreDeletionMarkFilter) Filter(ctx context.Context, metas map[ulid.UL f.deletionMarkMap = make(map[ulid.ULID]*metadata.DeletionMark) for id := range metas { - deletionMark, err := metadata.ReadDeletionMark(ctx, f.bkt, f.logger, id.String()) - if err == metadata.ErrorDeletionMarkNotFound { - continue - } - if errors.Cause(err) == metadata.ErrorUnmarshalDeletionMark { - level.Warn(f.logger).Log("msg", "found partial deletion-mark.json; if we will see it happening often for the same block, consider manually deleting deletion-mark.json from the object storage", "block", id, "err", err) - continue - } - if err != nil { + m := &metadata.DeletionMark{} + if err := metadata.ReadMarker(ctx, f.logger, f.bkt, id.String(), m); err != nil { + if errors.Cause(err) == metadata.ErrorMarkerNotFound { + continue + } + if errors.Cause(err) == metadata.ErrorUnmarshalMarker { + level.Warn(f.logger).Log("msg", "found partial deletion-mark.json; if we will see it happening often for the same block, consider manually deleting deletion-mark.json from the object storage", "block", id, "err", err) + continue + } return err } - f.deletionMarkMap[id] = deletionMark - if time.Since(time.Unix(deletionMark.DeletionTime, 0)).Seconds() > f.delay.Seconds() { + + f.deletionMarkMap[id] = m + if time.Since(time.Unix(m.DeletionTime, 0)).Seconds() > f.delay.Seconds() { synced.WithLabelValues(markedForDeletionMeta).Inc() delete(metas, id) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/index.go b/vendor/github.com/thanos-io/thanos/pkg/block/index.go index 41ac09dad7e..c51251b2dee 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/index.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/index.go @@ -303,12 +303,12 @@ func Repair(logger log.Logger, dir string, id ulid.ULID, source metadata.SourceT return resid, errors.Wrap(err, "rewrite block") } resmeta.Thanos.SegmentFiles = GetSegmentFiles(resdir) - if err := metadata.Write(logger, resdir, &resmeta); err != nil { + if err := resmeta.WriteToDir(logger, resdir); err != nil { return resid, err } // TSDB may rewrite metadata in bdir. // TODO: This is not needed in newer TSDB code. See https://github.com/prometheus/tsdb/pull/637. - if err := metadata.Write(logger, bdir, meta); err != nil { + if err := meta.WriteToDir(logger, bdir); err != nil { return resid, err } return resid, nil diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go index 42c33767f73..20ae1c5bc92 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go @@ -637,8 +637,8 @@ func newBinaryTOCFromByteSlice(bs index.ByteSlice) (*BinaryTOC, error) { }, nil } -func (r BinaryReader) IndexVersion() int { - return r.indexVersion +func (r BinaryReader) IndexVersion() (int, error) { + return r.indexVersion, nil } // TODO(bwplotka): Get advantage of multi value offset fetch. @@ -871,7 +871,7 @@ func yoloString(b []byte) string { return *((*string)(unsafe.Pointer(&b))) } -func (r BinaryReader) LabelNames() []string { +func (r BinaryReader) LabelNames() ([]string, error) { allPostingsKeyName, _ := index.AllPostingsKey() labelNames := make([]string, 0, len(r.postings)) for name := range r.postings { @@ -882,7 +882,7 @@ func (r BinaryReader) LabelNames() []string { labelNames = append(labelNames, name) } sort.Strings(labelNames) - return labelNames + return labelNames, nil } func (r *BinaryReader) Close() error { return r.c.Close() } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/header.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/header.go index dbbe335deb8..657427bd6f7 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/header.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/header.go @@ -18,7 +18,7 @@ type Reader interface { io.Closer // IndexVersion returns version of index. - IndexVersion() int + IndexVersion() (int, error) // PostingsOffset returns start and end offsets of postings for given name and value. // The end offset might be bigger than the actual posting ending, but not larger than the whole index file. @@ -36,5 +36,5 @@ type Reader interface { LabelValues(name string) ([]string, error) // LabelNames returns all label names. - LabelNames() []string + LabelNames() ([]string, error) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go new file mode 100644 index 00000000000..e9b9dc20bdc --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go @@ -0,0 +1,273 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package indexheader + +import ( + "context" + "os" + "path/filepath" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/prometheus/tsdb/index" + "go.uber.org/atomic" + + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/objstore" +) + +// LazyBinaryReaderMetrics holds metrics tracked by LazyBinaryReader. +type LazyBinaryReaderMetrics struct { + loadCount prometheus.Counter + loadFailedCount prometheus.Counter + unloadCount prometheus.Counter + unloadFailedCount prometheus.Counter + loadDuration prometheus.Histogram +} + +// NewLazyBinaryReaderMetrics makes new LazyBinaryReaderMetrics. +func NewLazyBinaryReaderMetrics(reg prometheus.Registerer) *LazyBinaryReaderMetrics { + return &LazyBinaryReaderMetrics{ + loadCount: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "indexheader_lazy_load_total", + Help: "Total number of index-header lazy load operations.", + }), + loadFailedCount: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "indexheader_lazy_load_failed_total", + Help: "Total number of failed index-header lazy load operations.", + }), + unloadCount: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "indexheader_lazy_unload_total", + Help: "Total number of index-header lazy unload operations.", + }), + unloadFailedCount: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "indexheader_lazy_unload_failed_total", + Help: "Total number of failed index-header lazy unload operations.", + }), + loadDuration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "indexheader_lazy_load_duration_seconds", + Help: "Duration of the index-header lazy loading in seconds.", + Buckets: []float64{0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5}, + }), + } +} + +// LazyBinaryReader wraps BinaryReader and loads (mmap) the index-header only upon +// the first Reader function is called. +type LazyBinaryReader struct { + ctx context.Context + logger log.Logger + bkt objstore.BucketReader + dir string + filepath string + id ulid.ULID + postingOffsetsInMemSampling int + metrics *LazyBinaryReaderMetrics + onClosed func(*LazyBinaryReader) + + readerMx sync.RWMutex + reader *BinaryReader + readerErr error + + // Keep track of the last time it was used. + usedAt *atomic.Int64 +} + +// NewLazyBinaryReader makes a new LazyBinaryReader. If the index-header does not exist +// on the local disk at dir location, this function will build it downloading required +// sections from the full index stored in the bucket. However, this function doesn't load +// (mmap) the index-header; it will be loaded at first Reader function call. +func NewLazyBinaryReader( + ctx context.Context, + logger log.Logger, + bkt objstore.BucketReader, + dir string, + id ulid.ULID, + postingOffsetsInMemSampling int, + metrics *LazyBinaryReaderMetrics, + onClosed func(*LazyBinaryReader), +) (*LazyBinaryReader, error) { + filepath := filepath.Join(dir, id.String(), block.IndexHeaderFilename) + + // If the index-header doesn't exist we should download it. + if _, err := os.Stat(filepath); err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrap(err, "read index header") + } + + level.Debug(logger).Log("msg", "the index-header doesn't exist on disk; recreating", "path", filepath) + + start := time.Now() + if err := WriteBinary(ctx, bkt, id, filepath); err != nil { + return nil, errors.Wrap(err, "write index header") + } + + level.Debug(logger).Log("msg", "built index-header file", "path", filepath, "elapsed", time.Since(start)) + } + + return &LazyBinaryReader{ + ctx: ctx, + logger: logger, + bkt: bkt, + dir: dir, + filepath: filepath, + id: id, + postingOffsetsInMemSampling: postingOffsetsInMemSampling, + metrics: metrics, + usedAt: atomic.NewInt64(time.Now().UnixNano()), + onClosed: onClosed, + }, nil +} + +// Close implements Reader. It unloads the index-header from memory (releasing the mmap +// area), but a subsequent call to any other Reader function will automatically reload it. +func (r *LazyBinaryReader) Close() error { + if r.onClosed != nil { + defer r.onClosed(r) + } + + return r.unload() +} + +// IndexVersion implements Reader. +func (r *LazyBinaryReader) IndexVersion() (int, error) { + r.readerMx.RLock() + defer r.readerMx.RUnlock() + + if err := r.load(); err != nil { + return 0, err + } + + r.usedAt.Store(time.Now().UnixNano()) + return r.reader.IndexVersion() +} + +// PostingsOffset implements Reader. +func (r *LazyBinaryReader) PostingsOffset(name string, value string) (index.Range, error) { + r.readerMx.RLock() + defer r.readerMx.RUnlock() + + if err := r.load(); err != nil { + return index.Range{}, err + } + + r.usedAt.Store(time.Now().UnixNano()) + return r.reader.PostingsOffset(name, value) +} + +// LookupSymbol implements Reader. +func (r *LazyBinaryReader) LookupSymbol(o uint32) (string, error) { + r.readerMx.RLock() + defer r.readerMx.RUnlock() + + if err := r.load(); err != nil { + return "", err + } + + r.usedAt.Store(time.Now().UnixNano()) + return r.reader.LookupSymbol(o) +} + +// LabelValues implements Reader. +func (r *LazyBinaryReader) LabelValues(name string) ([]string, error) { + r.readerMx.RLock() + defer r.readerMx.RUnlock() + + if err := r.load(); err != nil { + return nil, err + } + + r.usedAt.Store(time.Now().UnixNano()) + return r.reader.LabelValues(name) +} + +// LabelNames implements Reader. +func (r *LazyBinaryReader) LabelNames() ([]string, error) { + r.readerMx.RLock() + defer r.readerMx.RUnlock() + + if err := r.load(); err != nil { + return nil, err + } + + r.usedAt.Store(time.Now().UnixNano()) + return r.reader.LabelNames() +} + +// load ensures the underlying binary index-header reader has been successfully loaded. Returns +// an error on failure. This function MUST be called with the read lock already acquired. +func (r *LazyBinaryReader) load() error { + // Nothing to do if we already tried loading it. + if r.reader != nil { + return nil + } + if r.readerErr != nil { + return r.readerErr + } + + // Take the write lock to ensure we'll try to load it only once. Take again + // the read lock once done. + r.readerMx.RUnlock() + r.readerMx.Lock() + defer r.readerMx.RLock() + defer r.readerMx.Unlock() + + // Ensure none else tried to load it in the meanwhile. + if r.reader != nil { + return nil + } + if r.readerErr != nil { + return r.readerErr + } + + level.Debug(r.logger).Log("msg", "lazy loading index-header file", "path", r.filepath) + r.metrics.loadCount.Inc() + startTime := time.Now() + + reader, err := NewBinaryReader(r.ctx, r.logger, r.bkt, r.dir, r.id, r.postingOffsetsInMemSampling) + if err != nil { + r.metrics.loadFailedCount.Inc() + r.readerErr = err + return errors.Wrapf(err, "lazy load index-header file at %s", r.filepath) + } + + r.reader = reader + level.Debug(r.logger).Log("msg", "lazy loaded index-header file", "path", r.filepath, "elapsed", time.Since(startTime)) + r.metrics.loadDuration.Observe(time.Since(startTime).Seconds()) + + return nil +} + +// unload closes underlying BinaryReader. Calling this function on a already unloaded reader is a no-op. +func (r *LazyBinaryReader) unload() error { + // Always update the used timestamp so that the pool will not call unload() again until the next + // idle timeout is hit. + r.usedAt.Store(time.Now().UnixNano()) + + r.readerMx.Lock() + defer r.readerMx.Unlock() + + if r.reader == nil { + return nil + } + + r.metrics.unloadCount.Inc() + if err := r.reader.Close(); err != nil { + r.metrics.unloadFailedCount.Inc() + return err + } + + r.reader = nil + return nil +} + +func (r *LazyBinaryReader) lastUsedAt() int64 { + return r.usedAt.Load() +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go new file mode 100644 index 00000000000..660ae4853a3 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go @@ -0,0 +1,147 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package indexheader + +import ( + "context" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" + "github.com/prometheus/client_golang/prometheus" + + "github.com/thanos-io/thanos/pkg/objstore" +) + +// ReaderPool is used to istantiate new index-header readers and keep track of them. +// When the lazy reader is enabled, the pool keeps track of all instantiated readers +// and automatically close them once the idle timeout is reached. A closed lazy reader +// will be automatically re-opened upon next usage. +type ReaderPool struct { + lazyReaderEnabled bool + lazyReaderIdleTimeout time.Duration + lazyReaderMetrics *LazyBinaryReaderMetrics + logger log.Logger + + // Channel used to signal once the pool is closing. + close chan struct{} + + // Keep track of all readers managed by the pool. + lazyReadersMx sync.Mutex + lazyReaders map[*LazyBinaryReader]struct{} +} + +// NewReaderPool makes a new ReaderPool. +func NewReaderPool(logger log.Logger, lazyReaderEnabled bool, lazyReaderIdleTimeout time.Duration, reg prometheus.Registerer) *ReaderPool { + p := &ReaderPool{ + logger: logger, + lazyReaderEnabled: lazyReaderEnabled, + lazyReaderIdleTimeout: lazyReaderIdleTimeout, + lazyReaderMetrics: NewLazyBinaryReaderMetrics(reg), + lazyReaders: make(map[*LazyBinaryReader]struct{}), + close: make(chan struct{}), + } + + // Start a goroutine to close idle readers (only if required). + if p.lazyReaderEnabled && p.lazyReaderIdleTimeout > 0 { + checkFreq := p.lazyReaderIdleTimeout / 10 + + go func() { + for { + select { + case <-p.close: + return + case <-time.After(checkFreq): + p.closeIdleReaders() + } + } + }() + } + + return p +} + +// NewBinaryReader creates and returns a new binary reader. If the pool has been configured +// with lazy reader enabled, this function will return a lazy reader. The returned lazy reader +// is tracked by the pool and automatically closed once the idle timeout expires. +func (p *ReaderPool) NewBinaryReader(ctx context.Context, logger log.Logger, bkt objstore.BucketReader, dir string, id ulid.ULID, postingOffsetsInMemSampling int) (Reader, error) { + var reader Reader + var err error + + if p.lazyReaderEnabled { + reader, err = NewLazyBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.lazyReaderMetrics, p.onLazyReaderClosed) + } else { + reader, err = NewBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling) + } + + if err != nil { + return nil, err + } + + // Keep track of lazy readers only if required. + if p.lazyReaderEnabled && p.lazyReaderIdleTimeout > 0 { + p.lazyReadersMx.Lock() + p.lazyReaders[reader.(*LazyBinaryReader)] = struct{}{} + p.lazyReadersMx.Unlock() + } + + return reader, err +} + +// Close the pool and stop checking for idle readers. No reader tracked by this pool +// will be closed. It's the caller responsibility to close readers. +func (p *ReaderPool) Close() { + close(p.close) +} + +func (p *ReaderPool) closeIdleReaders() { + for _, r := range p.getIdleReaders() { + // Closing an already closed reader is a no-op, so we close it and just update + // the last timestamp on success. If it will be still be idle the next time this + // function is called, we'll try to close it again and will just be a no-op. + // + // Due to concurrency, the current implementation may close a reader which was + // use between when the list of idle readers has been computed and now. This is + // an edge case we're willing to accept, to not further complicate the logic. + if err := r.unload(); err != nil { + level.Warn(p.logger).Log("msg", "failed to close idle index-header reader", "err", err) + } + } +} + +func (p *ReaderPool) getIdleReaders() []*LazyBinaryReader { + p.lazyReadersMx.Lock() + defer p.lazyReadersMx.Unlock() + + var idle []*LazyBinaryReader + threshold := time.Now().Add(-p.lazyReaderIdleTimeout).UnixNano() + + for r := range p.lazyReaders { + if r.lastUsedAt() < threshold { + idle = append(idle, r) + } + } + + return idle +} + +func (p *ReaderPool) isTracking(r *LazyBinaryReader) bool { + p.lazyReadersMx.Lock() + defer p.lazyReadersMx.Unlock() + + _, ok := p.lazyReaders[r] + return ok +} + +func (p *ReaderPool) onLazyReaderClosed(r *LazyBinaryReader) { + p.lazyReadersMx.Lock() + defer p.lazyReadersMx.Unlock() + + // When this function is called, it means the reader has been closed NOT because was idle + // but because the consumer closed it. By contract, a reader closed by the consumer can't + // be used anymore, so we can automatically remove it from the pool. + delete(p.lazyReaders, r) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/deletionmark.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/deletionmark.go deleted file mode 100644 index 5f2a9f04adc..00000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/deletionmark.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package metadata - -import ( - "context" - "encoding/json" - "io/ioutil" - "path" - - "github.com/go-kit/kit/log" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/runutil" -) - -const ( - // DeletionMarkFilename is the known json filename to store details about when block is marked for deletion. - DeletionMarkFilename = "deletion-mark.json" - - // DeletionMarkVersion1 is the version of deletion-mark file supported by Thanos. - DeletionMarkVersion1 = 1 -) - -// ErrorDeletionMarkNotFound is the error when deletion-mark.json file is not found. -var ErrorDeletionMarkNotFound = errors.New("deletion-mark.json not found") - -// ErrorUnmarshalDeletionMark is the error when unmarshalling deletion-mark.json file. -// This error can occur because deletion-mark.json has been partially uploaded to block storage -// or the deletion-mark.json file is not a valid json file. -var ErrorUnmarshalDeletionMark = errors.New("unmarshal deletion-mark.json") - -// DeletionMark stores block id and when block was marked for deletion. -type DeletionMark struct { - // ID of the tsdb block. - ID ulid.ULID `json:"id"` - - // DeletionTime is a unix timestamp of when the block was marked to be deleted. - DeletionTime int64 `json:"deletion_time"` - - // Version of the file. - Version int `json:"version"` -} - -// ReadDeletionMark reads the given deletion mark file from /deletion-mark.json in bucket. -func ReadDeletionMark(ctx context.Context, bkt objstore.InstrumentedBucketReader, logger log.Logger, dir string) (*DeletionMark, error) { - deletionMarkFile := path.Join(dir, DeletionMarkFilename) - - r, err := bkt.ReaderWithExpectedErrs(bkt.IsObjNotFoundErr).Get(ctx, deletionMarkFile) - if err != nil { - if bkt.IsObjNotFoundErr(err) { - return nil, ErrorDeletionMarkNotFound - } - return nil, errors.Wrapf(err, "get file: %s", deletionMarkFile) - } - - defer runutil.CloseWithLogOnErr(logger, r, "close bkt deletion-mark reader") - - metaContent, err := ioutil.ReadAll(r) - if err != nil { - return nil, errors.Wrapf(err, "read file: %s", deletionMarkFile) - } - - deletionMark := DeletionMark{} - if err := json.Unmarshal(metaContent, &deletionMark); err != nil { - return nil, errors.Wrapf(ErrorUnmarshalDeletionMark, "file: %s; err: %v", deletionMarkFile, err.Error()) - } - - if deletionMark.Version != DeletionMarkVersion1 { - return nil, errors.Errorf("unexpected deletion-mark file version %d", deletionMark.Version) - } - - return &deletionMark, nil -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go new file mode 100644 index 00000000000..b3c8b9d1f05 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go @@ -0,0 +1,119 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package metadata + +import ( + "context" + "encoding/json" + "io/ioutil" + "path" + + "github.com/go-kit/kit/log" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/runutil" +) + +const ( + // DeletionMarkFilename is the known json filename for optional file storing details about when block is marked for deletion. + // If such file is present in block dir, it means the block is meant to be deleted after certain delay. + DeletionMarkFilename = "deletion-mark.json" + // NoCompactMarkFilename is the known json filename for optional file storing details about why block has to be excluded from compaction. + // If such file is present in block dir, it means the block has to excluded from compaction (both vertical and horizontal) or rewrite (e.g deletions). + NoCompactMarkFilename = "no-compact-mark.json" + + // DeletionMarkVersion1 is the version of deletion-mark file supported by Thanos. + DeletionMarkVersion1 = 1 + // NoCompactMarkVersion1 is the version of no-compact-mark file supported by Thanos. + NoCompactMarkVersion1 = 1 +) + +var ( + // ErrorMarkerNotFound is the error when marker file is not found. + ErrorMarkerNotFound = errors.New("marker not found") + // ErrorUnmarshalMarker is the error when unmarshalling marker JSON file. + // This error can occur because marker has been partially uploaded to block storage + // or the marker file is not a valid json file. + ErrorUnmarshalMarker = errors.New("unmarshal marker JSON") +) + +type Marker interface { + markerFilename() string +} + +// DeletionMark stores block id and when block was marked for deletion. +type DeletionMark struct { + // ID of the tsdb block. + ID ulid.ULID `json:"id"` + // Version of the file. + Version int `json:"version"` + // Details is a human readable string giving details of reason. + Details string `json:"details,omitempty"` + + // DeletionTime is a unix timestamp of when the block was marked to be deleted. + DeletionTime int64 `json:"deletion_time"` +} + +func (m *DeletionMark) markerFilename() string { return DeletionMarkFilename } + +// NoCompactReason is a reason for a block to be excluded from compaction. +type NoCompactReason string + +const ( + // ManualNoCompactReason is a custom reason of excluding from compaction that should be added when no-compact mark is added for unknown/user specified reason. + ManualNoCompactReason NoCompactReason = "manual" + // IndexSizeExceedingNoCompactReason is a reason of index being too big (for example exceeding 64GB limit: https://github.com/thanos-io/thanos/issues/1424) + // This reason can be ignored when vertical block sharding will be implemented. + IndexSizeExceedingNoCompactReason = "index-size-exceeding" +) + +// NoCompactMark marker stores reason of block being excluded from compaction if needed. +type NoCompactMark struct { + // ID of the tsdb block. + ID ulid.ULID `json:"id"` + // Version of the file. + Version int `json:"version"` + // Details is a human readable string giving details of reason. + Details string `json:"details,omitempty"` + + // NoCompactTime is a unix timestamp of when the block was marked for no compact. + NoCompactTime int64 `json:"no_compact_time"` + Reason NoCompactReason `json:"reason"` +} + +func (n *NoCompactMark) markerFilename() string { return NoCompactMarkFilename } + +// ReadMarker reads the given mark file from /.json in bucket. +func ReadMarker(ctx context.Context, logger log.Logger, bkt objstore.InstrumentedBucketReader, dir string, marker Marker) error { + markerFile := path.Join(dir, marker.markerFilename()) + r, err := bkt.ReaderWithExpectedErrs(bkt.IsObjNotFoundErr).Get(ctx, markerFile) + if err != nil { + if bkt.IsObjNotFoundErr(err) { + return ErrorMarkerNotFound + } + return errors.Wrapf(err, "get file: %s", markerFile) + } + defer runutil.CloseWithLogOnErr(logger, r, "close bkt marker reader") + + metaContent, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrapf(err, "read file: %s", markerFile) + } + + if err := json.Unmarshal(metaContent, marker); err != nil { + return errors.Wrapf(ErrorUnmarshalMarker, "file: %s; err: %v", markerFile, err.Error()) + } + switch marker.markerFilename() { + case NoCompactMarkFilename: + if version := marker.(*NoCompactMark).Version; version != NoCompactMarkVersion1 { + return errors.Errorf("unexpected no-compact-mark file version %d, expected %d", version, NoCompactMarkVersion1) + } + case DeletionMarkFilename: + if version := marker.(*DeletionMark).Version; version != DeletionMarkVersion1 { + return errors.Errorf("unexpected deletion-mark file version %d, expected %d", version, DeletionMarkVersion1) + } + } + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go index c361b703e74..db9e3792aa4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go @@ -10,6 +10,7 @@ package metadata import ( "encoding/json" + "io" "io/ioutil" "os" "path/filepath" @@ -37,11 +38,10 @@ const ( const ( // MetaFilename is the known JSON filename for meta information. MetaFilename = "meta.json" -) - -const ( - // MetaVersion is a enumeration of meta versions supported by Thanos. - MetaVersion1 = iota + 1 + // TSDBVersion1 is a enumeration of TSDB meta versions supported by Thanos. + TSDBVersion1 = 1 + // ThanosVersion1 is a enumeration of Thanos section of TSDB meta supported by Thanos. + ThanosVersion1 = 1 ) // Meta describes the a block's meta. It wraps the known TSDB meta structure and @@ -54,6 +54,9 @@ type Meta struct { // Thanos holds block meta information specific to Thanos. type Thanos struct { + // Version of Thanos meta file. If none specified, 1 is assumed (since first version did not have explicit version specified). + Version int `json:"version,omitempty"` + Labels map[string]string `json:"labels"` Downsample ThanosDownsample `json:"downsample"` @@ -61,7 +64,20 @@ type Thanos struct { Source SourceType `json:"source"` // List of segment files (in chunks directory), in sorted order. Optional. + // Deprecated. Use Files instead. SegmentFiles []string `json:"segment_files,omitempty"` + + // File is a sorted (by rel path) list of all files in block directory of this block known to TSDB. + // Sorted by relative path. + // Useful to avoid API call to get size of each file, as well as for debugging purposes. + // Optional, added in v0.17.0. + Files []File `json:"files,omitempty"` +} + +type File struct { + RelPath string `json:"rel_path"` + // SizeBytes is optional (e.g meta.json does not show size). + SizeBytes int64 `json:"size_bytes,omitempty"` } type ThanosDownsample struct { @@ -82,15 +98,15 @@ func InjectThanos(logger log.Logger, bdir string, meta Thanos, downsampledMeta * newMeta.Compaction = downsampledMeta.Compaction } - if err := Write(logger, bdir, newMeta); err != nil { + if err := newMeta.WriteToDir(logger, bdir); err != nil { return nil, errors.Wrap(err, "write new meta") } return newMeta, nil } -// Write writes the given meta into /meta.json. -func Write(logger log.Logger, dir string, meta *Meta) error { +// WriteToDir writes the encoded meta into /meta.json. +func (m Meta) WriteToDir(logger log.Logger, dir string) error { // Make any changes to the file appear atomic. path := filepath.Join(dir, MetaFilename) tmp := path + ".tmp" @@ -100,10 +116,7 @@ func Write(logger log.Logger, dir string, meta *Meta) error { return err } - enc := json.NewEncoder(f) - enc.SetIndent("", "\t") - - if err := enc.Encode(meta); err != nil { + if err := m.Write(f); err != nil { runutil.CloseWithLogOnErr(logger, f, "close meta") return err } @@ -113,6 +126,13 @@ func Write(logger log.Logger, dir string, meta *Meta) error { return renameFile(logger, tmp, path) } +// Write writes the given encoded meta to writer. +func (m Meta) Write(w io.Writer) error { + enc := json.NewEncoder(w) + enc.SetIndent("", "\t") + return enc.Encode(&m) +} + func renameFile(logger log.Logger, from, to string) error { if err := os.RemoveAll(to); err != nil { return err @@ -145,8 +165,18 @@ func Read(dir string) (*Meta, error) { if err := json.Unmarshal(b, &m); err != nil { return nil, err } - if m.Version != MetaVersion1 { + if m.Version != TSDBVersion1 { return nil, errors.Errorf("unexpected meta file version %d", m.Version) } + + version := m.Thanos.Version + if version == 0 { + // For compatibility. + version = ThanosVersion1 + } + + if version != ThanosVersion1 { + return nil, errors.Errorf("unexpected meta file Thanos section version %d", m.Version) + } return &m, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index 059e8eab9d6..5fdec0b3b7e 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -22,6 +22,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" @@ -182,7 +183,7 @@ func (s *Syncer) GarbageCollect(ctx context.Context) error { delCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) level.Info(s.logger).Log("msg", "marking outdated block for deletion", "block", id) - err := block.MarkForDeletion(delCtx, s.logger, s.bkt, id, s.metrics.blocksMarkedForDeletion) + err := block.MarkForDeletion(delCtx, s.logger, s.bkt, id, "outdated block", s.metrics.blocksMarkedForDeletion) cancel() if err != nil { s.metrics.garbageCollectionFailures.Inc() @@ -323,7 +324,7 @@ type Group struct { labels labels.Labels resolution int64 mtx sync.Mutex - blocks map[ulid.ULID]*metadata.Meta + metasByMinTime []*metadata.Meta acceptMalformedIndex bool enableVerticalCompaction bool compactions prometheus.Counter @@ -361,7 +362,6 @@ func NewGroup( key: key, labels: lset, resolution: resolution, - blocks: map[ulid.ULID]*metadata.Meta{}, acceptMalformedIndex: acceptMalformedIndex, enableVerticalCompaction: enableVerticalCompaction, compactions: compactions, @@ -391,7 +391,11 @@ func (cg *Group) Add(meta *metadata.Meta) error { if cg.resolution != meta.Thanos.Downsample.Resolution { return errors.New("block and group resolution do not match") } - cg.blocks[meta.ULID] = meta + + cg.metasByMinTime = append(cg.metasByMinTime, meta) + sort.Slice(cg.metasByMinTime, func(i, j int) bool { + return cg.metasByMinTime[i].MinTime < cg.metasByMinTime[j].MinTime + }) return nil } @@ -400,8 +404,8 @@ func (cg *Group) IDs() (ids []ulid.ULID) { cg.mtx.Lock() defer cg.mtx.Unlock() - for id := range cg.blocks { - ids = append(ids, id) + for _, m := range cg.metasByMinTime { + ids = append(ids, m.ULID) } sort.Slice(ids, func(i, j int) bool { return ids[i].Compare(ids[j]) < 0 @@ -414,13 +418,10 @@ func (cg *Group) MinTime() int64 { cg.mtx.Lock() defer cg.mtx.Unlock() - min := int64(math.MaxInt64) - for _, b := range cg.blocks { - if b.MinTime < min { - min = b.MinTime - } + if len(cg.metasByMinTime) > 0 { + return cg.metasByMinTime[0].MinTime } - return min + return math.MaxInt64 } // MaxTime returns the max time across all group's blocks. @@ -429,9 +430,9 @@ func (cg *Group) MaxTime() int64 { defer cg.mtx.Unlock() max := int64(math.MinInt64) - for _, b := range cg.blocks { - if b.MaxTime > max { - max = b.MaxTime + for _, m := range cg.metasByMinTime { + if m.MaxTime > max { + max = m.MaxTime } } return max @@ -447,9 +448,35 @@ func (cg *Group) Resolution() int64 { return cg.resolution } +// Planner returns blocks to compact. +type Planner interface { + // Plan returns a block directories of blocks that should be compacted into single one. + // The blocks can be overlapping. The provided metadata has to be ordered by minTime. + Plan(ctx context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) +} + +// Compactor provides compaction against an underlying storage of time series data. +// This is similar to tsdb.Compactor just without Plan method. +// TODO(bwplotka): Split the Planner from Compactor on upstream as well, so we can import it. +type Compactor interface { + // Write persists a Block into a directory. + // No Block is written when resulting Block has 0 samples, and returns empty ulid.ULID{}. + Write(dest string, b tsdb.BlockReader, mint, maxt int64, parent *tsdb.BlockMeta) (ulid.ULID, error) + + // Compact runs compaction against the provided directories. Must + // only be called concurrently with results of Plan(). + // Can optionally pass a list of already open blocks, + // to avoid having to reopen them. + // When resulting Block has 0 samples + // * No block is written. + // * The source dirs are marked Deletable. + // * Returns empty ulid.ULID{}. + Compact(dest string, dirs []string, open []*tsdb.Block) (ulid.ULID, error) +} + // Compact plans and runs a single compaction against the group. The compacted result // is uploaded into the bucket the blocks were retrieved from. -func (cg *Group) Compact(ctx context.Context, dir string, comp tsdb.Compactor) (shouldRerun bool, compID ulid.ULID, rerr error) { +func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp Compactor) (shouldRerun bool, compID ulid.ULID, rerr error) { cg.compactionRunsStarted.Inc() subDir := filepath.Join(dir, cg.Key()) @@ -470,7 +497,7 @@ func (cg *Group) Compact(ctx context.Context, dir string, comp tsdb.Compactor) ( return false, ulid.ULID{}, errors.Wrap(err, "create compaction group dir") } - shouldRerun, compID, err := cg.compact(ctx, subDir, comp) + shouldRerun, compID, err := cg.compact(ctx, subDir, planner, comp) if err != nil { cg.compactionFailures.Inc() return false, ulid.ULID{}, err @@ -562,22 +589,18 @@ func IsRetryError(err error) bool { return ok } -func (cg *Group) areBlocksOverlapping(include *metadata.Meta, excludeDirs ...string) error { +func (cg *Group) areBlocksOverlapping(include *metadata.Meta, exclude ...*metadata.Meta) error { var ( - metas []tsdb.BlockMeta - exclude = map[ulid.ULID]struct{}{} + metas []tsdb.BlockMeta + excludeMap = map[ulid.ULID]struct{}{} ) - for _, e := range excludeDirs { - id, err := ulid.Parse(filepath.Base(e)) - if err != nil { - return errors.Wrapf(err, "overlaps find dir %s", e) - } - exclude[id] = struct{}{} + for _, meta := range exclude { + excludeMap[meta.ULID] = struct{}{} } - for _, m := range cg.blocks { - if _, ok := exclude[m.ULID]; ok { + for _, m := range cg.metasByMinTime { + if _, ok := excludeMap[m.ULID]; ok { continue } metas = append(metas, m.BlockMeta) @@ -648,13 +671,13 @@ func RepairIssue347(ctx context.Context, logger log.Logger, bkt objstore.Bucket, defer cancel() // TODO(bplotka): Issue with this will introduce overlap that will halt compactor. Automate that (fix duplicate overlaps caused by this). - if err := block.MarkForDeletion(delCtx, logger, bkt, ie.id, blocksMarkedForDeletion); err != nil { + if err := block.MarkForDeletion(delCtx, logger, bkt, ie.id, "source of repaired block", blocksMarkedForDeletion); err != nil { return errors.Wrapf(err, "marking old block %s for deletion has failed", ie.id) } return nil } -func (cg *Group) compact(ctx context.Context, dir string, comp tsdb.Compactor) (shouldRerun bool, compID ulid.ULID, err error) { +func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp Compactor) (shouldRerun bool, compID ulid.ULID, err error) { cg.mtx.Lock() defer cg.mtx.Unlock() @@ -670,29 +693,16 @@ func (cg *Group) compact(ctx context.Context, dir string, comp tsdb.Compactor) ( overlappingBlocks = true } - // Planning a compaction works purely based on the meta.json files in our future group's dir. - // So we first dump all our memory block metas into the directory. - for _, meta := range cg.blocks { - bdir := filepath.Join(dir, meta.ULID.String()) - if err := os.MkdirAll(bdir, 0777); err != nil { - return false, ulid.ULID{}, errors.Wrap(err, "create planning block dir") - } - if err := metadata.Write(cg.logger, bdir, meta); err != nil { - return false, ulid.ULID{}, errors.Wrap(err, "write planning meta file") - } - } - - // Plan against the written meta.json files. - plan, err := comp.Plan(dir) + toCompact, err := planner.Plan(ctx, cg.metasByMinTime) if err != nil { return false, ulid.ULID{}, errors.Wrap(err, "plan compaction") } - if len(plan) == 0 { + if len(toCompact) == 0 { // Nothing to do. return false, ulid.ULID{}, nil } - level.Info(cg.logger).Log("msg", "compaction available and planned; downloading blocks", "plan", fmt.Sprintf("%v", plan)) + level.Info(cg.logger).Log("msg", "compaction available and planned; downloading blocks", "plan", fmt.Sprintf("%v", toCompact)) // Due to #183 we verify that none of the blocks in the plan have overlapping sources. // This is one potential source of how we could end up with duplicated chunks. @@ -701,71 +711,54 @@ func (cg *Group) compact(ctx context.Context, dir string, comp tsdb.Compactor) ( // Once we have a plan we need to download the actual data. begin := time.Now() - for _, pdir := range plan { - meta, err := metadata.Read(pdir) - if err != nil { - return false, ulid.ULID{}, errors.Wrapf(err, "read meta from %s", pdir) - } - + toCompactDirs := make([]string, 0, len(toCompact)) + for _, meta := range toCompact { + bdir := filepath.Join(dir, meta.ULID.String()) for _, s := range meta.Compaction.Sources { if _, ok := uniqueSources[s]; ok { - return false, ulid.ULID{}, halt(errors.Errorf("overlapping sources detected for plan %v", plan)) + return false, ulid.ULID{}, halt(errors.Errorf("overlapping sources detected for plan %v", toCompact)) } uniqueSources[s] = struct{}{} } - id, err := ulid.Parse(filepath.Base(pdir)) - if err != nil { - return false, ulid.ULID{}, errors.Wrapf(err, "plan dir %s", pdir) - } - - if meta.ULID.Compare(id) != 0 { - return false, ulid.ULID{}, errors.Errorf("mismatch between meta %s and dir %s", meta.ULID, id) - } - - if err := block.Download(ctx, cg.logger, cg.bkt, id, pdir); err != nil { - return false, ulid.ULID{}, retry(errors.Wrapf(err, "download block %s", id)) + if err := block.Download(ctx, cg.logger, cg.bkt, meta.ULID, bdir); err != nil { + return false, ulid.ULID{}, retry(errors.Wrapf(err, "download block %s", meta.ULID)) } // Ensure all input blocks are valid. - stats, err := block.GatherIndexIssueStats(cg.logger, filepath.Join(pdir, block.IndexFilename), meta.MinTime, meta.MaxTime) + stats, err := block.GatherIndexIssueStats(cg.logger, filepath.Join(bdir, block.IndexFilename), meta.MinTime, meta.MaxTime) if err != nil { - return false, ulid.ULID{}, errors.Wrapf(err, "gather index issues for block %s", pdir) + return false, ulid.ULID{}, errors.Wrapf(err, "gather index issues for block %s", bdir) } if err := stats.CriticalErr(); err != nil { - return false, ulid.ULID{}, halt(errors.Wrapf(err, "block with not healthy index found %s; Compaction level %v; Labels: %v", pdir, meta.Compaction.Level, meta.Thanos.Labels)) + return false, ulid.ULID{}, halt(errors.Wrapf(err, "block with not healthy index found %s; Compaction level %v; Labels: %v", bdir, meta.Compaction.Level, meta.Thanos.Labels)) } if err := stats.Issue347OutsideChunksErr(); err != nil { - return false, ulid.ULID{}, issue347Error(errors.Wrapf(err, "invalid, but reparable block %s", pdir), meta.ULID) + return false, ulid.ULID{}, issue347Error(errors.Wrapf(err, "invalid, but reparable block %s", bdir), meta.ULID) } if err := stats.PrometheusIssue5372Err(); !cg.acceptMalformedIndex && err != nil { return false, ulid.ULID{}, errors.Wrapf(err, - "block id %s, try running with --debug.accept-malformed-index", id) + "block id %s, try running with --debug.accept-malformed-index", meta.ULID) } + toCompactDirs = append(toCompactDirs, bdir) } - level.Info(cg.logger).Log("msg", "downloaded and verified blocks; compacting blocks", "plan", fmt.Sprintf("%v", plan), "duration", time.Since(begin)) + level.Info(cg.logger).Log("msg", "downloaded and verified blocks; compacting blocks", "plan", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin)) begin = time.Now() - - compID, err = comp.Compact(dir, plan, nil) + compID, err = comp.Compact(dir, toCompactDirs, nil) if err != nil { - return false, ulid.ULID{}, halt(errors.Wrapf(err, "compact blocks %v", plan)) + return false, ulid.ULID{}, halt(errors.Wrapf(err, "compact blocks %v", toCompactDirs)) } if compID == (ulid.ULID{}) { // Prometheus compactor found that the compacted block would have no samples. - level.Info(cg.logger).Log("msg", "compacted block would have no samples, deleting source blocks", "blocks", fmt.Sprintf("%v", plan)) - for _, block := range plan { - meta, err := metadata.Read(block) - if err != nil { - level.Warn(cg.logger).Log("msg", "failed to read meta for block", "block", block) - continue - } + level.Info(cg.logger).Log("msg", "compacted block would have no samples, deleting source blocks", "blocks", fmt.Sprintf("%v", toCompactDirs)) + for _, meta := range toCompact { if meta.Stats.NumSamples == 0 { - if err := cg.deleteBlock(block); err != nil { - level.Warn(cg.logger).Log("msg", "failed to mark for deletion an empty block found during compaction", "block", block) + if err := cg.deleteBlock(meta.ULID, filepath.Join(dir, meta.ULID.String())); err != nil { + level.Warn(cg.logger).Log("msg", "failed to mark for deletion an empty block found during compaction", "block", meta.ULID) } } } @@ -777,7 +770,7 @@ func (cg *Group) compact(ctx context.Context, dir string, comp tsdb.Compactor) ( cg.verticalCompactions.Inc() } level.Info(cg.logger).Log("msg", "compacted blocks", "new", compID, - "blocks", fmt.Sprintf("%v", plan), "duration", time.Since(begin), "overlapping_blocks", overlappingBlocks) + "blocks", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin), "overlapping_blocks", overlappingBlocks) bdir := filepath.Join(dir, compID.String()) index := filepath.Join(bdir, block.IndexFilename) @@ -804,7 +797,7 @@ func (cg *Group) compact(ctx context.Context, dir string, comp tsdb.Compactor) ( // Ensure the output block is not overlapping with anything else, // unless vertical compaction is enabled. if !cg.enableVerticalCompaction { - if err := cg.areBlocksOverlapping(newMeta, plan...); err != nil { + if err := cg.areBlocksOverlapping(newMeta, toCompact...); err != nil { return false, ulid.ULID{}, halt(errors.Wrapf(err, "resulted compacted block %s overlaps with something", bdir)) } } @@ -819,23 +812,17 @@ func (cg *Group) compact(ctx context.Context, dir string, comp tsdb.Compactor) ( // Mark for deletion the blocks we just compacted from the group and bucket so they do not get included // into the next planning cycle. // Eventually the block we just uploaded should get synced into the group again (including sync-delay). - for _, b := range plan { - if err := cg.deleteBlock(b); err != nil { + for _, meta := range toCompact { + if err := cg.deleteBlock(meta.ULID, filepath.Join(dir, meta.ULID.String())); err != nil { return false, ulid.ULID{}, retry(errors.Wrapf(err, "mark old block for deletion from bucket")) } cg.groupGarbageCollectedBlocks.Inc() } - return true, compID, nil } -func (cg *Group) deleteBlock(b string) error { - id, err := ulid.Parse(filepath.Base(b)) - if err != nil { - return errors.Wrapf(err, "plan dir %s", b) - } - - if err := os.RemoveAll(b); err != nil { +func (cg *Group) deleteBlock(id ulid.ULID, bdir string) error { + if err := os.RemoveAll(bdir); err != nil { return errors.Wrapf(err, "remove old block dir %s", id) } @@ -843,7 +830,7 @@ func (cg *Group) deleteBlock(b string) error { delCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() level.Info(cg.logger).Log("msg", "marking compacted block for deletion", "old_block", id) - if err := block.MarkForDeletion(delCtx, cg.logger, cg.bkt, id, cg.blocksMarkedForDeletion); err != nil { + if err := block.MarkForDeletion(delCtx, cg.logger, cg.bkt, id, "source of compacted block", cg.blocksMarkedForDeletion); err != nil { return errors.Wrapf(err, "mark block %s for deletion from bucket", id) } return nil @@ -854,7 +841,8 @@ type BucketCompactor struct { logger log.Logger sy *Syncer grouper Grouper - comp tsdb.Compactor + comp Compactor + planner Planner compactDir string bkt objstore.Bucket concurrency int @@ -865,7 +853,8 @@ func NewBucketCompactor( logger log.Logger, sy *Syncer, grouper Grouper, - comp tsdb.Compactor, + planner Planner, + comp Compactor, compactDir string, bkt objstore.Bucket, concurrency int, @@ -877,6 +866,7 @@ func NewBucketCompactor( logger: logger, sy: sy, grouper: grouper, + planner: planner, comp: comp, compactDir: compactDir, bkt: bkt, @@ -914,7 +904,7 @@ func (c *BucketCompactor) Compact(ctx context.Context) (rerr error) { go func() { defer wg.Done() for g := range groupChan { - shouldRerunGroup, _, err := g.Compact(workCtx, c.compactDir, c.comp) + shouldRerunGroup, _, err := g.Compact(workCtx, c.compactDir, c.planner, c.comp) if err == nil { if shouldRerunGroup { mtx.Lock() @@ -995,3 +985,50 @@ func (c *BucketCompactor) Compact(ctx context.Context) (rerr error) { level.Info(c.logger).Log("msg", "compaction iterations done") return nil } + +var _ block.MetadataFilter = &GatherNoCompactionMarkFilter{} + +// GatherNoCompactionMarkFilter is a block.Fetcher filter that passes all metas. While doing it, it gathers all no-compact-mark.json markers. +// Not go routine safe. +// TODO(bwplotka): Add unit test. +type GatherNoCompactionMarkFilter struct { + logger log.Logger + bkt objstore.InstrumentedBucketReader + noCompactMarkedMap map[ulid.ULID]*metadata.NoCompactMark +} + +// NewGatherNoCompactionMarkFilter creates GatherNoCompactionMarkFilter. +func NewGatherNoCompactionMarkFilter(logger log.Logger, bkt objstore.InstrumentedBucketReader) *GatherNoCompactionMarkFilter { + return &GatherNoCompactionMarkFilter{ + logger: logger, + bkt: bkt, + } +} + +// NoCompactMarkedBlocks returns block ids that were marked for no compaction. +func (f *GatherNoCompactionMarkFilter) NoCompactMarkedBlocks() map[ulid.ULID]*metadata.NoCompactMark { + return f.noCompactMarkedMap +} + +// Filter passes all metas, while gathering no compact markers. +func (f *GatherNoCompactionMarkFilter) Filter(ctx context.Context, metas map[ulid.ULID]*metadata.Meta, synced *extprom.TxGaugeVec) error { + f.noCompactMarkedMap = make(map[ulid.ULID]*metadata.NoCompactMark) + + for id := range metas { + m := &metadata.NoCompactMark{} + // TODO(bwplotka): Hook up bucket cache here + reset API so we don't introduce API calls . + if err := metadata.ReadMarker(ctx, f.logger, f.bkt, id.String(), m); err != nil { + if errors.Cause(err) == metadata.ErrorMarkerNotFound { + continue + } + if errors.Cause(err) == metadata.ErrorUnmarshalMarker { + level.Warn(f.logger).Log("msg", "found partial no-compact-mark.json; if we will see it happening often for the same block, consider manually deleting no-compact-mark.json from the object storage", "block", id, "err", err) + continue + } + return err + } + synced.WithLabelValues(block.MarkedForNoCompactionMeta).Inc() + f.noCompactMarkedMap[id] = m + } + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/streamed_block_writer.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/streamed_block_writer.go index fbe49eae4df..1872091bfda 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/streamed_block_writer.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/streamed_block_writer.go @@ -201,12 +201,12 @@ func (w *streamedBlockWriter) syncDir() (err error) { // writeMetaFile writes meta file. func (w *streamedBlockWriter) writeMetaFile() error { - w.meta.Version = metadata.MetaVersion1 + w.meta.Version = metadata.TSDBVersion1 w.meta.Thanos.Source = metadata.CompactorSource w.meta.Thanos.SegmentFiles = block.GetSegmentFiles(w.blockDir) w.meta.Stats.NumChunks = w.totalChunks w.meta.Stats.NumSamples = w.totalSamples w.meta.Stats.NumSeries = w.seriesRefs - return metadata.Write(w.logger, w.blockDir, &w.meta) + return w.meta.WriteToDir(w.logger, w.blockDir) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go new file mode 100644 index 00000000000..208d4832a49 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go @@ -0,0 +1,303 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package compact + +import ( + "context" + "fmt" + "math" + "path/filepath" + + "github.com/go-kit/kit/log" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/objstore" +) + +type tsdbBasedPlanner struct { + logger log.Logger + + ranges []int64 + + noCompBlocksFunc func() map[ulid.ULID]*metadata.NoCompactMark +} + +var _ Planner = &tsdbBasedPlanner{} + +// NewTSDBBasedPlanner is planner with the same functionality as Prometheus' TSDB. +// TODO(bwplotka): Consider upstreaming this to Prometheus. +// It's the same functionality just without accessing filesystem. +func NewTSDBBasedPlanner(logger log.Logger, ranges []int64) *tsdbBasedPlanner { + return &tsdbBasedPlanner{ + logger: logger, + ranges: ranges, + noCompBlocksFunc: func() map[ulid.ULID]*metadata.NoCompactMark { + return make(map[ulid.ULID]*metadata.NoCompactMark) + }, + } +} + +// NewPlanner is a default Thanos planner with the same functionality as Prometheus' TSDB plus special handling of excluded blocks. +// It's the same functionality just without accessing filesystem, and special handling of excluded blocks. +func NewPlanner(logger log.Logger, ranges []int64, noCompBlocks *GatherNoCompactionMarkFilter) *tsdbBasedPlanner { + return &tsdbBasedPlanner{logger: logger, ranges: ranges, noCompBlocksFunc: noCompBlocks.NoCompactMarkedBlocks} +} + +// TODO(bwplotka): Consider smarter algorithm, this prefers smaller iterative compactions vs big single one: https://github.com/thanos-io/thanos/issues/3405 +func (p *tsdbBasedPlanner) Plan(_ context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) { + return p.plan(p.noCompBlocksFunc(), metasByMinTime) +} + +func (p *tsdbBasedPlanner) plan(noCompactMarked map[ulid.ULID]*metadata.NoCompactMark, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) { + notExcludedMetasByMinTime := make([]*metadata.Meta, 0, len(metasByMinTime)) + for _, meta := range metasByMinTime { + if _, excluded := noCompactMarked[meta.ULID]; excluded { + continue + } + notExcludedMetasByMinTime = append(notExcludedMetasByMinTime, meta) + } + + res := selectOverlappingMetas(notExcludedMetasByMinTime) + if len(res) > 0 { + return res, nil + } + // No overlapping blocks, do compaction the usual way. + + // We do not include a recently producted block with max(minTime), so the block which was just uploaded to bucket. + // This gives users a window of a full block size maintenance if needed. + if _, excluded := noCompactMarked[metasByMinTime[len(metasByMinTime)-1].ULID]; !excluded { + notExcludedMetasByMinTime = notExcludedMetasByMinTime[:len(notExcludedMetasByMinTime)-1] + } + metasByMinTime = metasByMinTime[:len(metasByMinTime)-1] + res = append(res, selectMetas(p.ranges, noCompactMarked, metasByMinTime)...) + if len(res) > 0 { + return res, nil + } + + // Compact any blocks with big enough time range that have >5% tombstones. + for i := len(notExcludedMetasByMinTime) - 1; i >= 0; i-- { + meta := notExcludedMetasByMinTime[i] + if meta.MaxTime-meta.MinTime < p.ranges[len(p.ranges)/2] { + break + } + if float64(meta.Stats.NumTombstones)/float64(meta.Stats.NumSeries+1) > 0.05 { + return []*metadata.Meta{notExcludedMetasByMinTime[i]}, nil + } + } + + return nil, nil +} + +// selectMetas returns the dir metas that should be compacted into a single new block. +// If only a single block range is configured, the result is always nil. +// Copied and adjusted from https://github.com/prometheus/prometheus/blob/3d8826a3d42566684283a9b7f7e812e412c24407/tsdb/compact.go#L229. +func selectMetas(ranges []int64, noCompactMarked map[ulid.ULID]*metadata.NoCompactMark, metasByMinTime []*metadata.Meta) []*metadata.Meta { + if len(ranges) < 2 || len(metasByMinTime) < 1 { + return nil + } + highTime := metasByMinTime[len(metasByMinTime)-1].MinTime + + for _, iv := range ranges[1:] { + parts := splitByRange(metasByMinTime, iv) + if len(parts) == 0 { + continue + } + Outer: + for _, p := range parts { + // Do not select the range if it has a block whose compaction failed. + for _, m := range p { + if m.Compaction.Failed { + continue Outer + } + } + + if len(p) < 2 { + continue + } + + mint := p[0].MinTime + maxt := p[len(p)-1].MaxTime + + // Pick the range of blocks if it spans the full range (potentially with gaps) or is before the most recent block. + // This ensures we don't compact blocks prematurely when another one of the same size still would fits in the range + // after upload. + if maxt-mint != iv && maxt > highTime { + continue + } + + // Check if any of resulted blocks are excluded. Exclude them in a way that does not introduce gaps to the system + // as well as preserve the ranges that would be used if they were not excluded. + // This is meant as short-term workaround to create ability for marking some blocks to not be touched for compaction. + lastExcluded := 0 + for i, id := range p { + if _, excluded := noCompactMarked[id.ULID]; !excluded { + continue + } + if len(p[lastExcluded:i]) > 1 { + return p[lastExcluded:i] + } + lastExcluded = i + 1 + } + if len(p[lastExcluded:]) > 1 { + return p[lastExcluded:] + } + } + } + + return nil +} + +// selectOverlappingMetas returns all dirs with overlapping time ranges. +// It expects sorted input by mint and returns the overlapping dirs in the same order as received. +// Copied and adjusted from https://github.com/prometheus/prometheus/blob/3d8826a3d42566684283a9b7f7e812e412c24407/tsdb/compact.go#L268. +func selectOverlappingMetas(metasByMinTime []*metadata.Meta) []*metadata.Meta { + if len(metasByMinTime) < 2 { + return nil + } + var overlappingMetas []*metadata.Meta + globalMaxt := metasByMinTime[0].MaxTime + for i, m := range metasByMinTime[1:] { + if m.MinTime < globalMaxt { + if len(overlappingMetas) == 0 { + // When it is the first overlap, need to add the last one as well. + overlappingMetas = append(overlappingMetas, metasByMinTime[i]) + } + overlappingMetas = append(overlappingMetas, m) + } else if len(overlappingMetas) > 0 { + break + } + + if m.MaxTime > globalMaxt { + globalMaxt = m.MaxTime + } + } + return overlappingMetas +} + +// splitByRange splits the directories by the time range. The range sequence starts at 0. +// +// For example, if we have blocks [0-10, 10-20, 50-60, 90-100] and the split range tr is 30 +// it returns [0-10, 10-20], [50-60], [90-100]. +// Copied and adjusted from: https://github.com/prometheus/prometheus/blob/3d8826a3d42566684283a9b7f7e812e412c24407/tsdb/compact.go#L294. +func splitByRange(metasByMinTime []*metadata.Meta, tr int64) [][]*metadata.Meta { + var splitDirs [][]*metadata.Meta + + for i := 0; i < len(metasByMinTime); { + var ( + group []*metadata.Meta + t0 int64 + m = metasByMinTime[i] + ) + // Compute start of aligned time range of size tr closest to the current block's start. + if m.MinTime >= 0 { + t0 = tr * (m.MinTime / tr) + } else { + t0 = tr * ((m.MinTime - tr + 1) / tr) + } + + // Skip blocks that don't fall into the range. This can happen via mis-alignment or + // by being the multiple of the intended range. + if m.MaxTime > t0+tr { + i++ + continue + } + + // Add all metas to the current group that are within [t0, t0+tr]. + for ; i < len(metasByMinTime); i++ { + // Either the block falls into the next range or doesn't fit at all (checked above). + if metasByMinTime[i].MaxTime > t0+tr { + break + } + group = append(group, metasByMinTime[i]) + } + + if len(group) > 0 { + splitDirs = append(splitDirs, group) + } + } + + return splitDirs +} + +type largeTotalIndexSizeFilter struct { + *tsdbBasedPlanner + + bkt objstore.Bucket + markedForNoCompact prometheus.Counter + totalMaxIndexSizeBytes int64 +} + +var _ Planner = &largeTotalIndexSizeFilter{} + +// WithLargeTotalIndexSizeFilter wraps Planner with largeTotalIndexSizeFilter that checks the given plans and estimates total index size. +// When found, it marks block for no compaction by placing no-compact.json and updating cache. +// NOTE: The estimation is very rough as it assumes extreme cases of indexes sharing no bytes, thus summing all source index sizes. +// Adjust limit accordingly reducing to some % of actual limit you want to give. +// TODO(bwplotka): This is short term fix for https://github.com/thanos-io/thanos/issues/1424, replace with vertical block sharding https://github.com/thanos-io/thanos/pull/3390. +func WithLargeTotalIndexSizeFilter(with *tsdbBasedPlanner, bkt objstore.Bucket, totalMaxIndexSizeBytes int64, markedForNoCompact prometheus.Counter) *largeTotalIndexSizeFilter { + return &largeTotalIndexSizeFilter{tsdbBasedPlanner: with, bkt: bkt, totalMaxIndexSizeBytes: totalMaxIndexSizeBytes, markedForNoCompact: markedForNoCompact} +} + +func (t *largeTotalIndexSizeFilter) Plan(ctx context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) { + noCompactMarked := t.noCompBlocksFunc() + copiedNoCompactMarked := make(map[ulid.ULID]*metadata.NoCompactMark, len(noCompactMarked)) + for k, v := range noCompactMarked { + copiedNoCompactMarked[k] = v + } + +PlanLoop: + for { + plan, err := t.plan(copiedNoCompactMarked, metasByMinTime) + if err != nil { + return nil, err + } + var totalIndexBytes, maxIndexSize int64 = 0, math.MinInt64 + var biggestIndex int + for i, p := range plan { + indexSize := int64(-1) + for _, f := range p.Thanos.Files { + if f.RelPath == block.IndexFilename { + indexSize = f.SizeBytes + } + } + if indexSize <= 0 { + // Get size from bkt instead. + attr, err := t.bkt.Attributes(ctx, filepath.Join(p.ULID.String(), block.IndexFilename)) + if err != nil { + return nil, errors.Wrapf(err, "get attr of %v", filepath.Join(p.ULID.String(), block.IndexFilename)) + } + indexSize = attr.Size + } + + if maxIndexSize < indexSize { + maxIndexSize = indexSize + biggestIndex = i + } + totalIndexBytes += indexSize + if totalIndexBytes >= t.totalMaxIndexSizeBytes { + // Marking blocks for no compact to limit size. + // TODO(bwplotka): Make sure to reset cache once this is done: https://github.com/thanos-io/thanos/issues/3408 + if err := block.MarkForNoCompact( + ctx, + t.logger, + t.bkt, + plan[biggestIndex].ULID, + metadata.IndexSizeExceedingNoCompactReason, + fmt.Sprintf("largeTotalIndexSizeFilter: Total compacted block's index size could exceed: %v with this block. See https://github.com/thanos-io/thanos/issues/1424", t.totalMaxIndexSizeBytes), + t.markedForNoCompact, + ); err != nil { + return nil, errors.Wrapf(err, "mark %v for no compaction", plan[biggestIndex].ULID.String()) + } + // Make sure wrapped planner exclude this block. + copiedNoCompactMarked[plan[biggestIndex].ULID] = &metadata.NoCompactMark{ID: plan[biggestIndex].ULID, Version: metadata.NoCompactMarkVersion1} + continue PlanLoop + } + } + // Planned blocks should not exceed limit. + return plan, nil + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/retention.go b/vendor/github.com/thanos-io/thanos/pkg/compact/retention.go index 8d1ba7d5fb7..703bad5dda4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/retention.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/retention.go @@ -5,6 +5,7 @@ package compact import ( "context" + "fmt" "time" "github.com/go-kit/kit/log" @@ -37,7 +38,7 @@ func ApplyRetentionPolicyByResolution( maxTime := time.Unix(m.MaxTime/1000, 0) if time.Now().After(maxTime.Add(retentionDuration)) { level.Info(logger).Log("msg", "applying retention: marking block for deletion", "id", id, "maxTime", maxTime.String()) - if err := block.MarkForDeletion(ctx, logger, bkt, id, blocksMarkedForDeletion); err != nil { + if err := block.MarkForDeletion(ctx, logger, bkt, id, fmt.Sprintf("block exceeding retention of %v", retentionDuration), blocksMarkedForDeletion); err != nil { return errors.Wrap(err, "delete block") } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/component/component.go b/vendor/github.com/thanos-io/thanos/pkg/component/component.go index c7451c4767f..a418b9461fc 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/component/component.go +++ b/vendor/github.com/thanos-io/thanos/pkg/component/component.go @@ -91,6 +91,7 @@ func FromProto(storeType storepb.StoreType) StoreAPI { var ( Bucket = source{component: component{name: "bucket"}} Cleanup = source{component: component{name: "cleanup"}} + Mark = source{component: component{name: "mark"}} Compact = source{component: component{name: "compact"}} Downsample = source{component: component{name: "downsample"}} Replicate = source{component: component{name: "replicate"}} diff --git a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go index ab6c15c6f88..c20e5b162b2 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go +++ b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "fmt" + "io" "io/ioutil" "net/http" "net/url" @@ -97,14 +98,26 @@ func NewWithTracingClient(logger log.Logger, userAgent string) *Client { ) } -func (c *Client) get2xx(ctx context.Context, u *url.URL) (_ []byte, _ int, err error) { - req, err := http.NewRequest(http.MethodGet, u.String(), nil) +// req2xx sends a request to the given url.URL. If method is http.MethodPost then +// the raw query is encoded in the body and the appropriate Content-Type is set. +func (c *Client) req2xx(ctx context.Context, u *url.URL, method string) (_ []byte, _ int, err error) { + var b io.Reader + if method == http.MethodPost { + rq := u.RawQuery + b = strings.NewReader(rq) + u.RawQuery = "" + } + + req, err := http.NewRequest(method, u.String(), b) if err != nil { return nil, 0, errors.Wrap(err, "create GET request") } if c.userAgent != "" { req.Header.Set("User-Agent", c.userAgent) } + if method == http.MethodPost { + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + } resp, err := c.Do(req.WithContext(ctx)) if err != nil { @@ -148,7 +161,7 @@ func (c *Client) ExternalLabels(ctx context.Context, base *url.URL) (labels.Labe span, ctx := tracing.StartSpan(ctx, "/prom_config HTTP[client]") defer span.Finish() - body, _, err := c.get2xx(ctx, &u) + body, _, err := c.req2xx(ctx, &u, http.MethodGet) if err != nil { return nil, err } @@ -339,6 +352,7 @@ func (c *Client) Snapshot(ctx context.Context, base *url.URL, skipHead bool) (st type QueryOptions struct { Deduplicate bool PartialResponseStrategy storepb.PartialResponseStrategy + Method string } func (p *QueryOptions) AddTo(values url.Values) error { @@ -381,7 +395,12 @@ func (c *Client) QueryInstant(ctx context.Context, base *url.URL, query string, span, ctx := tracing.StartSpan(ctx, "/prom_query_instant HTTP[client]") defer span.Finish() - body, _, err := c.get2xx(ctx, &u) + method := opts.Method + if method == "" { + method = http.MethodGet + } + + body, _, err := c.req2xx(ctx, &u, method) if err != nil { return nil, nil, errors.Wrap(err, "read query instant response") } @@ -483,7 +502,7 @@ func (c *Client) QueryRange(ctx context.Context, base *url.URL, query string, st span, ctx := tracing.StartSpan(ctx, "/prom_query_range HTTP[client]") defer span.Finish() - body, _, err := c.get2xx(ctx, &u) + body, _, err := c.req2xx(ctx, &u, http.MethodGet) if err != nil { return nil, nil, errors.Wrap(err, "read query range response") } @@ -565,7 +584,7 @@ func (c *Client) AlertmanagerAlerts(ctx context.Context, base *url.URL) ([]*mode span, ctx := tracing.StartSpan(ctx, "/alertmanager_alerts HTTP[client]") defer span.Finish() - body, _, err := c.get2xx(ctx, &u) + body, _, err := c.req2xx(ctx, &u, http.MethodGet) if err != nil { return nil, err } @@ -592,7 +611,7 @@ func (c *Client) get2xxResultWithGRPCErrors(ctx context.Context, spanName string span, ctx := tracing.StartSpan(ctx, spanName) defer span.Finish() - body, code, err := c.get2xx(ctx, u) + body, code, err := c.req2xx(ctx, u, http.MethodGet) if err != nil { if code, exists := statusToCode[code]; exists && code != 0 { return status.Error(code, err.Error()) diff --git a/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go b/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go index 06b8391e3f5..d3f80c3d59b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go +++ b/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go @@ -360,7 +360,7 @@ func (s *Shipper) upload(ctx context.Context, meta *metadata.Meta) error { } meta.Thanos.Source = s.source meta.Thanos.SegmentFiles = block.GetSegmentFiles(updir) - if err := metadata.Write(s.logger, updir, meta); err != nil { + if err := meta.WriteToDir(s.logger, updir); err != nil { return errors.Wrap(err, "write meta file") } return block.Upload(ctx, s.logger, s.bucket, updir) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index 6ec95eed57a..6bcead6b9b4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -40,6 +40,7 @@ import ( "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact/downsample" "github.com/thanos-io/thanos/pkg/component" + "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/gate" "github.com/thanos-io/thanos/pkg/model" "github.com/thanos-io/thanos/pkg/objstore" @@ -247,13 +248,14 @@ type FilterConfig struct { // BucketStore implements the store API backed by a bucket. It loads all index // files to local disk. type BucketStore struct { - logger log.Logger - metrics *bucketStoreMetrics - bkt objstore.InstrumentedBucketReader - fetcher block.MetadataFetcher - dir string - indexCache storecache.IndexCache - chunkPool pool.BytesPool + logger log.Logger + metrics *bucketStoreMetrics + bkt objstore.InstrumentedBucketReader + fetcher block.MetadataFetcher + dir string + indexCache storecache.IndexCache + indexReaderPool *indexheader.ReaderPool + chunkPool pool.BytesPool // Sets of blocks that have the same labels. They are indexed by a hash over their label set. mtx sync.RWMutex @@ -305,6 +307,8 @@ func NewBucketStore( enablePostingsCompression bool, postingOffsetsInMemSampling int, enableSeriesResponseHints bool, // TODO(pracucci) Thanos 0.12 and below doesn't gracefully handle new fields in SeriesResponse. Drop this flag and always enable hints once we can drop backward compatibility. + lazyIndexReaderEnabled bool, + lazyIndexReaderIdleTimeout time.Duration, ) (*BucketStore, error) { if logger == nil { logger = log.NewNopLogger() @@ -321,6 +325,7 @@ func NewBucketStore( fetcher: fetcher, dir: dir, indexCache: indexCache, + indexReaderPool: indexheader.NewReaderPool(logger, lazyIndexReaderEnabled, lazyIndexReaderIdleTimeout, extprom.WrapRegistererWithPrefix("thanos_bucket_store_", reg)), chunkPool: chunkPool, blocks: map[ulid.ULID]*bucketBlock{}, blockSets: map[uint64]*bucketBlockSet{}, @@ -352,6 +357,8 @@ func (s *BucketStore) Close() (err error) { for _, b := range s.blocks { runutil.CloseWithErrCapture(&err, b, "closing Bucket Block") } + + s.indexReaderPool.Close() return err } @@ -484,7 +491,7 @@ func (s *BucketStore) addBlock(ctx context.Context, meta *metadata.Meta) (err er lset := labels.FromMap(meta.Thanos.Labels) h := lset.Hash() - indexHeaderReader, err := indexheader.NewBinaryReader( + indexHeaderReader, err := s.indexReaderPool.NewBinaryReader( ctx, s.logger, s.bkt, @@ -704,50 +711,31 @@ func blockSeries( chks []chunks.Meta ) for _, id := range ps { - if err := indexr.LoadedSeries(id, &lset, &chks); err != nil { + if err := indexr.LoadedSeries(id, &lset, &chks, req); err != nil { return nil, nil, errors.Wrap(err, "read series") } - s := seriesEntry{lset: make(labels.Labels, 0, len(lset)+len(extLset))} - if !req.SkipChunks { - s.refs = make([]uint64, 0, len(chks)) - s.chks = make([]storepb.AggrChunk, 0, len(chks)) - } - - // hasValidChunk is used to check whether there is at least one chunk in the required time range. - var hasValidChunk bool - for _, meta := range chks { - if meta.MaxTime < req.MinTime { - continue - } - if meta.MinTime > req.MaxTime { - break - } - - // Fast path for no chunks series. - if req.SkipChunks { - hasValidChunk = true - break - } - - if err := chunkr.addPreload(meta.Ref); err != nil { - return nil, nil, errors.Wrap(err, "add chunk preload") - } - s.chks = append(s.chks, storepb.AggrChunk{ - MinTime: meta.MinTime, - MaxTime: meta.MaxTime, - }) - s.refs = append(s.refs, meta.Ref) - } + if len(chks) > 0 { + s := seriesEntry{lset: make(labels.Labels, 0, len(lset)+len(extLset))} + if !req.SkipChunks { + s.refs = make([]uint64, 0, len(chks)) + s.chks = make([]storepb.AggrChunk, 0, len(chks)) + for _, meta := range chks { + if err := chunkr.addPreload(meta.Ref); err != nil { + return nil, nil, errors.Wrap(err, "add chunk preload") + } + s.chks = append(s.chks, storepb.AggrChunk{ + MinTime: meta.MinTime, + MaxTime: meta.MaxTime, + }) + s.refs = append(s.refs, meta.Ref) + } - // Reserve chunksLimiter if we save chunks. - if len(s.chks) > 0 { - hasValidChunk = true - if err := chunksLimiter.Reserve(uint64(len(s.chks))); err != nil { - return nil, nil, errors.Wrap(err, "exceeded chunks limit") + // Reserve chunksLimiter if we save chunks. + if err := chunksLimiter.Reserve(uint64(len(s.chks))); err != nil { + return nil, nil, errors.Wrap(err, "exceeded chunks limit") + } } - } - if hasValidChunk { for _, l := range lset { // Skip if the external labels of the block overrule the series' label. // NOTE(fabxc): maybe move it to a prefixed version to still ensure uniqueness of series? @@ -1094,7 +1082,11 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq defer runutil.CloseWithLogOnErr(s.logger, indexr, "label names") // Do it via index reader to have pending reader registered correctly. - res := indexr.block.indexHeaderReader.LabelNames() + res, err := indexr.block.indexHeaderReader.LabelNames() + if err != nil { + return errors.Wrap(err, "label names") + } + sort.Strings(res) mtx.Lock() @@ -1574,7 +1566,11 @@ func (r *bucketIndexReader) ExpandedPostings(ms []*labels.Matcher) ([]uint64, er // As of version two all series entries are 16 byte padded. All references // we get have to account for that to get the correct offset. - if r.block.indexHeaderReader.IndexVersion() >= 2 { + version, err := r.block.indexHeaderReader.IndexVersion() + if err != nil { + return nil, errors.Wrap(err, "get index version") + } + if version >= 2 { for i, id := range ps { ps[i] = id * 16 } @@ -2009,7 +2005,8 @@ func (g gapBasedPartitioner) Partition(length int, rng func(int) (uint64, uint64 // LoadedSeries populates the given labels and chunk metas for the series identified // by the reference. // Returns ErrNotFound if the ref does not resolve to a known series. -func (r *bucketIndexReader) LoadedSeries(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error { +func (r *bucketIndexReader) LoadedSeries(ref uint64, lset *labels.Labels, chks *[]chunks.Meta, + req *storepb.SeriesRequest) error { b, ok := r.loadedSeries[ref] if !ok { return errors.Errorf("series %d not found", ref) @@ -2018,7 +2015,7 @@ func (r *bucketIndexReader) LoadedSeries(ref uint64, lset *labels.Labels, chks * r.stats.seriesTouched++ r.stats.seriesTouchedSizeSum += len(b) - return r.dec.Series(b, lset, chks) + return r.decodeSeriesWithReq(b, lset, chks, req) } // Close released the underlying resources of the reader. @@ -2027,6 +2024,95 @@ func (r *bucketIndexReader) Close() error { return nil } +// decodeSeriesWithReq decodes a series entry from the given byte slice based on the SeriesRequest. +func (r *bucketIndexReader) decodeSeriesWithReq(b []byte, lbls *labels.Labels, chks *[]chunks.Meta, + req *storepb.SeriesRequest) error { + *lbls = (*lbls)[:0] + *chks = (*chks)[:0] + + d := encoding.Decbuf{B: b} + + k := d.Uvarint() + + for i := 0; i < k; i++ { + lno := uint32(d.Uvarint()) + lvo := uint32(d.Uvarint()) + + if d.Err() != nil { + return errors.Wrap(d.Err(), "read series label offsets") + } + + ln, err := r.dec.LookupSymbol(lno) + if err != nil { + return errors.Wrap(err, "lookup label name") + } + lv, err := r.dec.LookupSymbol(lvo) + if err != nil { + return errors.Wrap(err, "lookup label value") + } + + *lbls = append(*lbls, labels.Label{Name: ln, Value: lv}) + } + + // Read the chunks meta data. + k = d.Uvarint() + + if k == 0 { + return nil + } + + t0 := d.Varint64() + maxt := int64(d.Uvarint64()) + t0 + ref0 := int64(d.Uvarint64()) + + // No chunk in the required time range. + if t0 > req.MaxTime { + return nil + } + + if req.MinTime <= maxt { + *chks = append(*chks, chunks.Meta{ + Ref: uint64(ref0), + MinTime: t0, + MaxTime: maxt, + }) + // Get a valid chunk, return if it is a skip chunk request. + if req.SkipChunks { + return nil + } + } + t0 = maxt + + for i := 1; i < k; i++ { + mint := int64(d.Uvarint64()) + t0 + maxt := int64(d.Uvarint64()) + mint + ref0 += d.Varint64() + t0 = maxt + + if maxt < req.MinTime { + continue + } + if mint > req.MaxTime { + break + } + + if d.Err() != nil { + return errors.Wrapf(d.Err(), "read meta for chunk %d", i) + } + + *chks = append(*chks, chunks.Meta{ + Ref: uint64(ref0), + MinTime: mint, + MaxTime: maxt, + }) + + if req.SkipChunks { + return nil + } + } + return d.Err() +} + type bucketChunkReader struct { ctx context.Context block *bucketBlock diff --git a/vendor/modules.txt b/vendor/modules.txt index 03eb438514c..5fec2c957f5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -339,7 +339,8 @@ github.com/julienschmidt/httprouter github.com/klauspost/cpuid # github.com/konsorten/go-windows-terminal-sequences v1.0.3 github.com/konsorten/go-windows-terminal-sequences -# github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939 +# github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 +## explicit github.com/lann/builder # github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 github.com/lann/ps @@ -537,7 +538,7 @@ github.com/sean-/seed # github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e ## explicit github.com/segmentio/fasthash/fnv1a -# github.com/sercand/kuberesolver v2.4.0+incompatible +# github.com/sercand/kuberesolver v2.4.0+incompatible => github.com/sercand/kuberesolver v2.4.0+incompatible github.com/sercand/kuberesolver # github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/httpfs/filter @@ -565,7 +566,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-io/thanos v0.13.1-0.20201030101306-47f9a225cc52 +# github.com/thanos-io/thanos v0.13.1-0.20201112171553-05fbe15616c7 ## explicit github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader @@ -1026,5 +1027,6 @@ sigs.k8s.io/yaml # github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.0 # github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 # google.golang.org/grpc => google.golang.org/grpc v1.29.1 +# github.com/sercand/kuberesolver => github.com/sercand/kuberesolver v2.4.0+incompatible # github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab # github.com/opentracing-contrib/go-grpc => github.com/pracucci/go-grpc v0.0.0-20201022134131-ef559b8db645