@@ -35,6 +35,7 @@ import (
3535
3636 "github.com/prometheus-community/parquet-common/convert"
3737 "github.com/prometheus-community/parquet-common/schema"
38+ "github.com/prometheus-community/parquet-common/search"
3839 "github.com/prometheus-community/parquet-common/storage"
3940 "github.com/prometheus-community/parquet-common/util"
4041)
@@ -269,6 +270,126 @@ func TestQueryable(t *testing.T) {
269270 require .Equal (t , expectedLabelValues , lValues )
270271 })
271272 })
273+
274+ t .Run ("RowCountQuota" , func (t * testing.T ) {
275+ // Test with limited row count quota
276+ limitedRowQuota := func (ctx context.Context ) int64 {
277+ return 10 // Only allow 10 rows
278+ }
279+ queryable , err := createQueryable (shard , WithRowCountLimitFunc (limitedRowQuota ))
280+ require .NoError (t , err )
281+ querier , err := queryable .Querier (data .MinTime , data .MaxTime )
282+ require .NoError (t , err )
283+
284+ // Try to query more rows than quota allows
285+ matchers := []* labels.Matcher {labels .MustNewMatcher (labels .MatchEqual , "unique" , "unique_0" )}
286+ ss := querier .Select (ctx , true , nil , matchers ... )
287+
288+ // This should fail due to row count quota
289+ for ss .Next () {
290+ _ = ss .At ()
291+ }
292+ require .Error (t , ss .Err ())
293+ require .Contains (t , ss .Err ().Error (), "would fetch too many rows" )
294+ require .True (t , search .IsResourceExhausted (ss .Err ()))
295+
296+ // Test with sufficient quota
297+ sufficientRowQuota := func (ctx context.Context ) int64 {
298+ return 1000 // Allow 1000 rows
299+ }
300+ queryable , err = createQueryable (shard , WithRowCountLimitFunc (sufficientRowQuota ))
301+ require .NoError (t , err )
302+ querier , err = queryable .Querier (data .MinTime , data .MaxTime )
303+ require .NoError (t , err )
304+
305+ ss = querier .Select (ctx , true , nil , matchers ... )
306+ var series []prom_storage.Series
307+ for ss .Next () {
308+ series = append (series , ss .At ())
309+ }
310+ require .NoError (t , ss .Err ())
311+ require .NotEmpty (t , series )
312+ })
313+
314+ t .Run ("ChunkBytesQuota" , func (t * testing.T ) {
315+ // Test with limited chunk bytes quota
316+ limitedChunkQuota := func (ctx context.Context ) int64 {
317+ return 100 // Only allow 100 bytes
318+ }
319+ queryable , err := createQueryable (shard , WithChunkBytesLimitFunc (limitedChunkQuota ))
320+ require .NoError (t , err )
321+ querier , err := queryable .Querier (data .MinTime , data .MaxTime )
322+ require .NoError (t , err )
323+
324+ // Try to query chunks that exceed the quota
325+ matchers := []* labels.Matcher {labels .MustNewMatcher (labels .MatchEqual , "unique" , "unique_0" )}
326+ ss := querier .Select (ctx , true , nil , matchers ... )
327+
328+ // This should fail due to chunk bytes quota
329+ for ss .Next () {
330+ _ = ss .At ()
331+ }
332+ require .Error (t , ss .Err ())
333+ require .Contains (t , ss .Err ().Error (), "would fetch too many chunk bytes" )
334+ require .True (t , search .IsResourceExhausted (ss .Err ()))
335+
336+ // Test with sufficient quota
337+ sufficientChunkQuota := func (ctx context.Context ) int64 {
338+ return 1000000 // Allow 1MB
339+ }
340+ queryable , err = createQueryable (shard , WithChunkBytesLimitFunc (sufficientChunkQuota ))
341+ require .NoError (t , err )
342+ querier , err = queryable .Querier (data .MinTime , data .MaxTime )
343+ require .NoError (t , err )
344+
345+ ss = querier .Select (ctx , true , nil , matchers ... )
346+ var series []prom_storage.Series
347+ for ss .Next () {
348+ series = append (series , ss .At ())
349+ }
350+ require .NoError (t , ss .Err ())
351+ require .NotEmpty (t , series )
352+ })
353+
354+ t .Run ("DataBytesQuota" , func (t * testing.T ) {
355+ // Test with limited data bytes quota
356+ limitedDataQuota := func (ctx context.Context ) int64 {
357+ return 100 // Only allow 100 bytes
358+ }
359+ queryable , err := createQueryable (shard , WithDataBytesLimitFunc (limitedDataQuota ))
360+ require .NoError (t , err )
361+ querier , err := queryable .Querier (data .MinTime , data .MaxTime )
362+ require .NoError (t , err )
363+
364+ // Try to query data that exceeds the quota
365+ matchers := []* labels.Matcher {labels .MustNewMatcher (labels .MatchEqual , "unique" , "unique_0" )}
366+ ss := querier .Select (ctx , true , nil , matchers ... )
367+
368+ // This should fail due to data bytes quota
369+ for ss .Next () {
370+ _ = ss .At ()
371+ }
372+ require .Error (t , ss .Err ())
373+ require .Contains (t , ss .Err ().Error (), "would fetch too many data bytes" )
374+ require .True (t , search .IsResourceExhausted (ss .Err ()))
375+
376+ // Test with sufficient quota
377+ sufficientDataQuota := func (ctx context.Context ) int64 {
378+ return 1000000 // Allow 1MB
379+ }
380+ queryable , err = createQueryable (shard , WithDataBytesLimitFunc (sufficientDataQuota ))
381+ require .NoError (t , err )
382+ querier , err = queryable .Querier (data .MinTime , data .MaxTime )
383+ require .NoError (t , err )
384+
385+ ss = querier .Select (ctx , true , nil , matchers ... )
386+ var series []prom_storage.Series
387+ for ss .Next () {
388+ series = append (series , ss .At ())
389+ }
390+ require .NoError (t , ss .Err ())
391+ require .NotEmpty (t , series )
392+ })
272393 })
273394 }
274395}
@@ -338,11 +459,11 @@ func queryWithQueryable(t *testing.T, mint, maxt int64, shard storage.ParquetSha
338459 return found
339460}
340461
341- func createQueryable (shard storage.ParquetShard ) (prom_storage.Queryable , error ) {
462+ func createQueryable (shard storage.ParquetShard , opts ... QueryableOpts ) (prom_storage.Queryable , error ) {
342463 d := schema .NewPrometheusParquetChunksDecoder (chunkenc .NewPool ())
343464 return NewParquetQueryable (d , func (ctx context.Context , mint , maxt int64 ) ([]storage.ParquetShard , error ) {
344465 return []storage.ParquetShard {shard }, nil
345- })
466+ }, opts ... )
346467}
347468
348469var benchmarkCases = []struct {
0 commit comments