@@ -101,7 +101,7 @@ func (st *acceptanceTestStorage) Querier(from, to int64) (prom_storage.Querier,
101101
102102 h := st .st .Head ()
103103 data := util.TestData {MinTime : h .MinTime (), MaxTime : h .MaxTime ()}
104- block := convertToParquet (st .t , context .Background (), bkt , data , h )
104+ block := convertToParquet (st .t , context .Background (), bkt , data , h , nil )
105105
106106 q , err := createQueryable (block )
107107 if err != nil {
@@ -163,25 +163,37 @@ func TestQueryable(t *testing.T) {
163163 require .NoError (t , err )
164164
165165 testCases := map [string ]struct {
166- ops []storage.FileOption
166+ storageOpts []storage.FileOption
167+ convertOpts []convert.ConvertOption
167168 }{
168169 "default" : {
169- ops : []storage.FileOption {},
170+ storageOpts : []storage.FileOption {},
171+ convertOpts : defaultConvertOpts ,
170172 },
171173 "skipBloomFilters" : {
172- ops : []storage.FileOption {
174+ storageOpts : []storage.FileOption {
173175 storage .WithFileOptions (
174176 parquet .SkipBloomFilters (true ),
175177 parquet .OptimisticRead (true ),
176178 ),
177179 },
180+ convertOpts : defaultConvertOpts ,
181+ },
182+ "multipleSortingColumns" : {
183+ storageOpts : []storage.FileOption {},
184+ convertOpts : []convert.ConvertOption {
185+ convert .WithName ("shard" ),
186+ convert .WithColDuration (time .Hour ),
187+ convert .WithRowGroupSize (500 ),
188+ convert .WithPageBufferSize (300 ),
189+ convert .WithSortBy (fmt .Sprintf ("%s,%s" , labels .MetricName , "label_name_1" )),
190+ },
178191 },
179192 }
180193
181194 for n , tc := range testCases {
182195 t .Run (n , func (t * testing.T ) {
183- // Convert to Parquet
184- shard := convertToParquet (t , ctx , bkt , data , st .Head (), tc .ops ... )
196+ shard := convertToParquet (t , ctx , bkt , data , st .Head (), tc .convertOpts , tc .storageOpts ... )
185197
186198 t .Run ("QueryByUniqueLabel" , func (t * testing.T ) {
187199 matchers := []* labels.Matcher {labels .MustNewMatcher (labels .MatchEqual , "unique" , "unique_0" )}
@@ -210,6 +222,25 @@ func TestQueryable(t *testing.T) {
210222 }
211223 })
212224
225+ t .Run ("QueryByMultipleLabels" , func (t * testing.T ) {
226+ for i := 0 ; i < 50 ; i ++ {
227+ name := fmt .Sprintf ("metric_%d" , rand .Int ()% cfg .TotalMetricNames )
228+ matchers := []* labels.Matcher {
229+ labels .MustNewMatcher (labels .MatchEqual , labels .MetricName , name ),
230+ labels .MustNewMatcher (labels .MatchEqual , "label_name_1" , "label_value_1" ),
231+ }
232+ sFound := queryWithQueryable (t , data .MinTime , data .MaxTime , shard , nil , matchers ... )
233+ totalFound := 0
234+ for _ , series := range sFound {
235+ totalFound ++
236+ require .Equal (t , series .Labels ().Get (labels .MetricName ), name )
237+ require .Equal (t , series .Labels ().Get ("label_name_1" ), "label_value_1" )
238+ require .Contains (t , data .SeriesHash , series .Labels ().Hash ())
239+ }
240+ require .Equal (t , cfg .MetricsPerMetricName , totalFound )
241+ }
242+ })
243+
213244 t .Run ("QueryByUniqueLabel and SkipChunks=true" , func (t * testing.T ) {
214245 matchers := []* labels.Matcher {labels .MustNewMatcher (labels .MatchEqual , "unique" , "unique_0" )}
215246 hints := & prom_storage.SelectHints {
@@ -718,7 +749,7 @@ func BenchmarkSelect(b *testing.B) {
718749
719750 cbkt := newCountingBucket (bkt )
720751 data := util.TestData {MinTime : h .MinTime (), MaxTime : h .MaxTime ()}
721- block := convertToParquetForBenchWithCountingBucket (b , ctx , bkt , cbkt , data , h )
752+ block := convertToParquetForBenchWithCountingBucket (b , ctx , bkt , cbkt , data , h , nil )
722753 queryable , err := createQueryable (block )
723754 require .NoError (b , err , "unable to create queryable" )
724755
@@ -754,18 +785,25 @@ func BenchmarkSelect(b *testing.B) {
754785 }
755786}
756787
757- func convertToParquet (t * testing.T , ctx context.Context , bkt * bucket , data util.TestData , h convert.Convertible , opts ... storage.FileOption ) storage.ParquetShard {
758- colDuration := time .Hour
788+ var defaultConvertOpts = []convert.ConvertOption {
789+ convert .WithName ("shard" ),
790+ convert .WithColDuration (time .Hour ),
791+ convert .WithRowGroupSize (500 ),
792+ convert .WithPageBufferSize (300 ),
793+ }
794+
795+ func convertToParquet (t * testing.T , ctx context.Context , bkt * bucket , data util.TestData , h convert.Convertible , convertOpts []convert.ConvertOption , opts ... storage.FileOption ) storage.ParquetShard {
796+ if convertOpts == nil {
797+ convertOpts = defaultConvertOpts
798+ }
799+
759800 shards , err := convert .ConvertTSDBBlock (
760801 ctx ,
761802 bkt ,
762803 data .MinTime ,
763804 data .MaxTime ,
764805 []convert.Convertible {h },
765- convert .WithName ("shard" ),
766- convert .WithColDuration (colDuration ),
767- convert .WithRowGroupSize (500 ),
768- convert .WithPageBufferSize (300 ),
806+ convertOpts ... ,
769807 )
770808 if err != nil {
771809 t .Fatalf ("error converting to parquet: %v" , err )
@@ -785,18 +823,18 @@ func convertToParquet(t *testing.T, ctx context.Context, bkt *bucket, data util.
785823 return shard
786824}
787825
788- func convertToParquetForBenchWithCountingBucket (tb testing.TB , ctx context.Context , bkt * bucket , cbkt * countingBucket , data util.TestData , h convert.Convertible , opts ... storage.FileOption ) storage.ParquetShard {
789- colDuration := time .Hour
826+ func convertToParquetForBenchWithCountingBucket (tb testing.TB , ctx context.Context , bkt * bucket , cbkt * countingBucket , data util.TestData , h convert.Convertible , convertOpts []convert.ConvertOption , opts ... storage.FileOption ) storage.ParquetShard {
827+ if convertOpts == nil {
828+ convertOpts = defaultConvertOpts
829+ }
830+
790831 shards , err := convert .ConvertTSDBBlock (
791832 ctx ,
792833 bkt ,
793834 data .MinTime ,
794835 data .MaxTime ,
795836 []convert.Convertible {h },
796- convert .WithName ("shard" ),
797- convert .WithColDuration (colDuration ),
798- convert .WithRowGroupSize (500 ),
799- convert .WithPageBufferSize (300 ),
837+ convertOpts ... ,
800838 )
801839 if err != nil {
802840 tb .Fatalf ("error converting to parquet: %v" , err )
0 commit comments