Skip to content

Commit e5a0a29

Browse files
committed
skip blocks with out-of-order chunk during compaction
Signed-off-by: Yang Hu <[email protected]>
1 parent 0058003 commit e5a0a29

File tree

7 files changed

+244
-33
lines changed

7 files changed

+244
-33
lines changed

cmd/thanos/compact.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -349,6 +349,7 @@ func runCompact(
349349
reg,
350350
compactMetrics.blocksMarked.WithLabelValues(metadata.DeletionMarkFilename),
351351
compactMetrics.garbageCollectedBlocks,
352+
compactMetrics.blocksMarked.WithLabelValues(metadata.NoCompactMarkFilename),
352353
metadata.HashFunc(conf.hashFunc),
353354
)
354355
planner := compact.WithLargeTotalIndexSizeFilter(

pkg/block/index.go

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -111,12 +111,9 @@ func (i HealthStats) Issue347OutsideChunksErr() error {
111111
return nil
112112
}
113113

114-
// CriticalErr returns error if stats indicates critical block issue, that might solved only by manual repair procedure.
115-
func (i HealthStats) CriticalErr() error {
116-
var errMsg []string
117-
118-
if i.OutOfOrderSeries > 0 {
119-
errMsg = append(errMsg, fmt.Sprintf(
114+
func (i HealthStats) OutOfOrderChunksErr() error {
115+
if i.OutOfOrderChunks > 0 {
116+
return errors.New(fmt.Sprintf(
120117
"%d/%d series have an average of %.3f out-of-order chunks: "+
121118
"%.3f of these are exact duplicates (in terms of data and time range)",
122119
i.OutOfOrderSeries,
@@ -125,6 +122,12 @@ func (i HealthStats) CriticalErr() error {
125122
float64(i.DuplicatedChunks)/float64(i.OutOfOrderChunks),
126123
))
127124
}
125+
return nil
126+
}
127+
128+
// CriticalErr returns error if stats indicates critical block issue, that might solved only by manual repair procedure.
129+
func (i HealthStats) CriticalErr() error {
130+
var errMsg []string
128131

129132
n := i.OutsideChunks - (i.CompleteOutsideChunks + i.Issue347OutsideChunks)
130133
if n > 0 {
@@ -158,6 +161,10 @@ func (i HealthStats) AnyErr() error {
158161
errMsg = append(errMsg, err.Error())
159162
}
160163

164+
if err := i.OutOfOrderChunksErr(); err != nil {
165+
errMsg = append(errMsg, err.Error())
166+
}
167+
161168
if len(errMsg) > 0 {
162169
return errors.New(strings.Join(errMsg, ", "))
163170
}

pkg/block/index_test.go

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ package block
66
import (
77
"context"
88
"io/ioutil"
9+
"math"
910
"os"
1011
"path/filepath"
1112
"testing"
@@ -83,5 +84,16 @@ func TestRewrite(t *testing.T) {
8384
testutil.Ok(t, ir2.Series(p.At(), &lset, &chks))
8485
testutil.Equals(t, 1, len(chks))
8586
}
87+
}
88+
89+
func TestGatherIndexHealthStatsReturnsOutOfOrderChunksErr(t *testing.T) {
90+
blockDir := "../testutil/testdata/test-for-out-of-order-chunk"
91+
err := testutil.PutOutOfOrderIndex(blockDir, 0, math.MaxInt64)
92+
testutil.Ok(t, err)
8693

94+
stats, err := GatherIndexHealthStats(log.NewLogfmtLogger(os.Stderr), blockDir+"/"+IndexFilename, 0, math.MaxInt64)
95+
96+
testutil.Ok(t, err)
97+
testutil.Equals(t, 1, stats.OutOfOrderChunks)
98+
testutil.NotOk(t, stats.OutOfOrderChunksErr())
8799
}

pkg/block/metadata/markers.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,8 @@ const (
6767
// IndexSizeExceedingNoCompactReason is a reason of index being too big (for example exceeding 64GB limit: https://github.com/thanos-io/thanos/issues/1424)
6868
// This reason can be ignored when vertical block sharding will be implemented.
6969
IndexSizeExceedingNoCompactReason = "index-size-exceeding"
70+
// OutOfOrderChunksNoCompactReason is a reason of to no compact block with index contains out of order chunk so that the compaction is not blocked.
71+
OutOfOrderChunksNoCompactReason = "block-index-out-of-order-chunk"
7072
)
7173

7274
// NoCompactMark marker stores reason of block being excluded from compaction if needed.

pkg/compact/compact.go

Lines changed: 52 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,7 @@ type DefaultGrouper struct {
240240
verticalCompactions *prometheus.CounterVec
241241
garbageCollectedBlocks prometheus.Counter
242242
blocksMarkedForDeletion prometheus.Counter
243+
blocksMarkedForNoCompact prometheus.Counter
243244
hashFunc metadata.HashFunc
244245
}
245246

@@ -252,6 +253,7 @@ func NewDefaultGrouper(
252253
reg prometheus.Registerer,
253254
blocksMarkedForDeletion prometheus.Counter,
254255
garbageCollectedBlocks prometheus.Counter,
256+
blocksMarkedForNoCompact prometheus.Counter,
255257
hashFunc metadata.HashFunc,
256258
) *DefaultGrouper {
257259
return &DefaultGrouper{
@@ -279,9 +281,10 @@ func NewDefaultGrouper(
279281
Name: "thanos_compact_group_vertical_compactions_total",
280282
Help: "Total number of group compaction attempts that resulted in a new block based on overlapping blocks.",
281283
}, []string{"group"}),
282-
garbageCollectedBlocks: garbageCollectedBlocks,
283-
blocksMarkedForDeletion: blocksMarkedForDeletion,
284-
hashFunc: hashFunc,
284+
blocksMarkedForNoCompact: blocksMarkedForNoCompact,
285+
garbageCollectedBlocks: garbageCollectedBlocks,
286+
blocksMarkedForDeletion: blocksMarkedForDeletion,
287+
hashFunc: hashFunc,
285288
}
286289
}
287290

@@ -309,6 +312,7 @@ func (g *DefaultGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (res []*Gro
309312
g.verticalCompactions.WithLabelValues(groupKey),
310313
g.garbageCollectedBlocks,
311314
g.blocksMarkedForDeletion,
315+
g.blocksMarkedForNoCompact,
312316
g.hashFunc,
313317
)
314318
if err != nil {
@@ -346,6 +350,7 @@ type Group struct {
346350
verticalCompactions prometheus.Counter
347351
groupGarbageCollectedBlocks prometheus.Counter
348352
blocksMarkedForDeletion prometheus.Counter
353+
blocksMarkedForNoCompact prometheus.Counter
349354
hashFunc metadata.HashFunc
350355
}
351356

@@ -365,6 +370,7 @@ func NewGroup(
365370
verticalCompactions prometheus.Counter,
366371
groupGarbageCollectedBlocks prometheus.Counter,
367372
blocksMarkedForDeletion prometheus.Counter,
373+
blockMakredForNoCopmact prometheus.Counter,
368374
hashFunc metadata.HashFunc,
369375
) (*Group, error) {
370376
if logger == nil {
@@ -385,6 +391,7 @@ func NewGroup(
385391
verticalCompactions: verticalCompactions,
386392
groupGarbageCollectedBlocks: groupGarbageCollectedBlocks,
387393
blocksMarkedForDeletion: blocksMarkedForDeletion,
394+
blocksMarkedForNoCompact: blockMakredForNoCopmact,
388395
hashFunc: hashFunc,
389396
}
390397
return g, nil
@@ -541,6 +548,27 @@ func IsIssue347Error(err error) bool {
541548
return ok
542549
}
543550

551+
// OutOfOrderChunkError is a type wrapper for OOO chunk error from validating block index.
552+
type OutOfOrderChunksError struct {
553+
err error
554+
555+
id ulid.ULID
556+
}
557+
558+
func (e OutOfOrderChunksError) Error() string {
559+
return e.err.Error()
560+
}
561+
562+
func outOfOrderChunkError(err error, brokenBlock ulid.ULID) OutOfOrderChunksError {
563+
return OutOfOrderChunksError{err: err, id: brokenBlock}
564+
}
565+
566+
// IsOutOfOrderChunk returns true if the base error is a OutOfOrderChunkError.
567+
func IsOutOfOrderChunkError(err error) bool {
568+
_, ok := errors.Cause(err).(OutOfOrderChunksError)
569+
return ok
570+
}
571+
544572
// HaltError is a type wrapper for errors that should halt any further progress on compactions.
545573
type HaltError struct {
546574
err error
@@ -749,6 +777,10 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp
749777
return false, ulid.ULID{}, halt(errors.Wrapf(err, "block with not healthy index found %s; Compaction level %v; Labels: %v", bdir, meta.Compaction.Level, meta.Thanos.Labels))
750778
}
751779

780+
if err := stats.OutOfOrderChunksErr(); err != nil {
781+
return false, ulid.ULID{}, outOfOrderChunkError(errors.Wrapf(err, "blocks with out of order chunks should be drop block from compaction: %s", bdir), meta.ULID)
782+
}
783+
752784
if err := stats.Issue347OutsideChunksErr(); err != nil {
753785
return false, ulid.ULID{}, issue347Error(errors.Wrapf(err, "invalid, but reparable block %s", bdir), meta.ULID)
754786
}
@@ -939,6 +971,23 @@ func (c *BucketCompactor) Compact(ctx context.Context) (rerr error) {
939971
continue
940972
}
941973
}
974+
// if block has out of order chunk, mark the block for no copmaction and continue
975+
if IsOutOfOrderChunkError(err) {
976+
if err := block.MarkForNoCompact(
977+
ctx,
978+
c.logger,
979+
c.bkt,
980+
err.(OutOfOrderChunksError).id,
981+
metadata.OutOfOrderChunksNoCompactReason,
982+
"OutofOrderChunk: marking block with out-of-order series/chunks to as no compact to unblock compaction", g.blocksMarkedForNoCompact); err == nil {
983+
mtx.Lock()
984+
finishedAllGroups = false
985+
mtx.Unlock()
986+
continue
987+
} else {
988+
panic("haha you fucked up.... block still being synced")
989+
}
990+
}
942991
errChan <- errors.Wrapf(err, "group %s", g.Key())
943992
return
944993
}

pkg/compact/compact_e2e_test.go

Lines changed: 53 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ func TestSyncer_GarbageCollect_e2e(t *testing.T) {
102102

103103
blocksMarkedForDeletion := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
104104
garbageCollectedBlocks := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
105+
blockMarkedForNoCompact := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
105106
ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter(nil, nil, 48*time.Hour, fetcherConcurrency)
106107
sy, err := NewMetaSyncer(nil, nil, bkt, metaFetcher, duplicateBlocksFilter, ignoreDeletionMarkFilter, blocksMarkedForDeletion, garbageCollectedBlocks, 1)
107108
testutil.Ok(t, err)
@@ -138,7 +139,7 @@ func TestSyncer_GarbageCollect_e2e(t *testing.T) {
138139
testutil.Ok(t, sy.GarbageCollect(ctx))
139140

140141
// Only the level 3 block, the last source block in both resolutions should be left.
141-
grouper := NewDefaultGrouper(nil, bkt, false, false, nil, blocksMarkedForDeletion, garbageCollectedBlocks, metadata.NoneFunc)
142+
grouper := NewDefaultGrouper(nil, bkt, false, false, nil, blocksMarkedForDeletion, garbageCollectedBlocks, blockMarkedForNoCompact, metadata.NoneFunc)
142143
groups, err := grouper.Groups(sy.Metas())
143144
testutil.Ok(t, err)
144145

@@ -195,23 +196,26 @@ func testGroupCompactE2e(t *testing.T, mergeFunc storage.VerticalChunkSeriesMerg
195196

196197
ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter(logger, objstore.WithNoopInstr(bkt), 48*time.Hour, fetcherConcurrency)
197198
duplicateBlocksFilter := block.NewDeduplicateFilter()
199+
noCompactMarkerFilter := NewGatherNoCompactionMarkFilter(logger, objstore.WithNoopInstr(bkt), 2)
198200
metaFetcher, err := block.NewMetaFetcher(nil, 32, objstore.WithNoopInstr(bkt), "", nil, []block.MetadataFilter{
199201
ignoreDeletionMarkFilter,
200202
duplicateBlocksFilter,
203+
noCompactMarkerFilter,
201204
}, nil)
202205
testutil.Ok(t, err)
203206

204207
blocksMarkedForDeletion := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
208+
blocksMaredForNoCompact := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
205209
garbageCollectedBlocks := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
206210
sy, err := NewMetaSyncer(nil, nil, bkt, metaFetcher, duplicateBlocksFilter, ignoreDeletionMarkFilter, blocksMarkedForDeletion, garbageCollectedBlocks, 5)
207211
testutil.Ok(t, err)
208212

209213
comp, err := tsdb.NewLeveledCompactor(ctx, reg, logger, []int64{1000, 3000}, nil, mergeFunc)
210214
testutil.Ok(t, err)
211215

212-
planner := NewTSDBBasedPlanner(logger, []int64{1000, 3000})
216+
planner := NewPlanner(logger, []int64{1000, 3000}, noCompactMarkerFilter)
213217

214-
grouper := NewDefaultGrouper(logger, bkt, false, false, reg, blocksMarkedForDeletion, garbageCollectedBlocks, metadata.NoneFunc)
218+
grouper := NewDefaultGrouper(logger, bkt, false, false, reg, blocksMarkedForDeletion, garbageCollectedBlocks, blocksMaredForNoCompact, metadata.NoneFunc)
215219
bComp, err := NewBucketCompactor(logger, sy, grouper, planner, comp, dir, bkt, 2)
216220
testutil.Ok(t, err)
217221

@@ -220,6 +224,7 @@ func testGroupCompactE2e(t *testing.T, mergeFunc storage.VerticalChunkSeriesMerg
220224
testutil.Equals(t, 0.0, promtest.ToFloat64(sy.metrics.garbageCollectedBlocks))
221225
testutil.Equals(t, 0.0, promtest.ToFloat64(sy.metrics.blocksMarkedForDeletion))
222226
testutil.Equals(t, 0.0, promtest.ToFloat64(sy.metrics.garbageCollectionFailures))
227+
testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.blocksMarkedForNoCompact))
223228
testutil.Equals(t, 0, MetricCount(grouper.compactions))
224229
testutil.Equals(t, 0, MetricCount(grouper.compactionRunsStarted))
225230
testutil.Equals(t, 0, MetricCount(grouper.compactionRunsCompleted))
@@ -233,7 +238,7 @@ func testGroupCompactE2e(t *testing.T, mergeFunc storage.VerticalChunkSeriesMerg
233238
extLabels2 := labels.Labels{{Name: "e1", Value: "1"}}
234239
metas := createAndUpload(t, bkt, []blockgenSpec{
235240
{
236-
numSamples: 100, mint: 0, maxt: 1000, extLset: extLabels, res: 124,
241+
numSamples: 100, mint: 500, maxt: 1000, extLset: extLabels, res: 124,
237242
series: []labels.Labels{
238243
{{Name: "a", Value: "1"}},
239244
{{Name: "a", Value: "2"}, {Name: "b", Value: "2"}},
@@ -303,31 +308,42 @@ func testGroupCompactE2e(t *testing.T, mergeFunc storage.VerticalChunkSeriesMerg
303308
{{Name: "a", Value: "7"}},
304309
},
305310
},
311+
}, []blockgenSpec{
312+
{
313+
numSamples: 100, mint: 0, maxt: 499, extLset: extLabels, res: 124,
314+
series: []labels.Labels{
315+
{{Name: "a", Value: "1"}},
316+
{{Name: "a", Value: "2"}, {Name: "b", Value: "2"}},
317+
{{Name: "a", Value: "3"}},
318+
{{Name: "a", Value: "4"}},
319+
},
320+
},
306321
})
307322

308323
testutil.Ok(t, bComp.Compact(ctx))
309324
testutil.Equals(t, 5.0, promtest.ToFloat64(sy.metrics.garbageCollectedBlocks))
310325
testutil.Equals(t, 5.0, promtest.ToFloat64(sy.metrics.blocksMarkedForDeletion))
326+
testutil.Equals(t, 1.0, promtest.ToFloat64(grouper.blocksMarkedForNoCompact))
311327
testutil.Equals(t, 0.0, promtest.ToFloat64(sy.metrics.garbageCollectionFailures))
312328
testutil.Equals(t, 4, MetricCount(grouper.compactions))
313329
testutil.Equals(t, 1.0, promtest.ToFloat64(grouper.compactions.WithLabelValues(DefaultGroupKey(metas[0].Thanos))))
314330
testutil.Equals(t, 1.0, promtest.ToFloat64(grouper.compactions.WithLabelValues(DefaultGroupKey(metas[7].Thanos))))
315331
testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactions.WithLabelValues(DefaultGroupKey(metas[4].Thanos))))
316332
testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactions.WithLabelValues(DefaultGroupKey(metas[5].Thanos))))
317333
testutil.Equals(t, 4, MetricCount(grouper.compactionRunsStarted))
318-
testutil.Equals(t, 2.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[0].Thanos))))
319-
testutil.Equals(t, 2.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[7].Thanos))))
334+
testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[0].Thanos))))
335+
testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[7].Thanos))))
320336
// TODO(bwplotka): Looks like we do some unnecessary loops. Not a major problem but investigate.
321-
testutil.Equals(t, 2.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[4].Thanos))))
322-
testutil.Equals(t, 2.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[5].Thanos))))
337+
testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[4].Thanos))))
338+
testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[5].Thanos))))
323339
testutil.Equals(t, 4, MetricCount(grouper.compactionRunsCompleted))
324340
testutil.Equals(t, 2.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(DefaultGroupKey(metas[0].Thanos))))
325-
testutil.Equals(t, 2.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(DefaultGroupKey(metas[7].Thanos))))
341+
testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(DefaultGroupKey(metas[7].Thanos))))
326342
// TODO(bwplotka): Looks like we do some unnecessary loops. Not a major problem but investigate.
327-
testutil.Equals(t, 2.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(DefaultGroupKey(metas[4].Thanos))))
328-
testutil.Equals(t, 2.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(DefaultGroupKey(metas[5].Thanos))))
343+
testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(DefaultGroupKey(metas[4].Thanos))))
344+
testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(DefaultGroupKey(metas[5].Thanos))))
329345
testutil.Equals(t, 4, MetricCount(grouper.compactionFailures))
330-
testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionFailures.WithLabelValues(DefaultGroupKey(metas[0].Thanos))))
346+
testutil.Equals(t, 1.0, promtest.ToFloat64(grouper.compactionFailures.WithLabelValues(DefaultGroupKey(metas[0].Thanos))))
331347
testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionFailures.WithLabelValues(DefaultGroupKey(metas[7].Thanos))))
332348
testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionFailures.WithLabelValues(DefaultGroupKey(metas[4].Thanos))))
333349
testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionFailures.WithLabelValues(DefaultGroupKey(metas[5].Thanos))))
@@ -342,6 +358,7 @@ func testGroupCompactE2e(t *testing.T, mergeFunc storage.VerticalChunkSeriesMerg
342358
metas[4].ULID: false,
343359
metas[5].ULID: false,
344360
metas[8].ULID: false,
361+
metas[9].ULID: false,
345362
}
346363
others := map[string]metadata.Meta{}
347364
testutil.Ok(t, bkt.Iter(ctx, "", func(n string) error {
@@ -374,7 +391,7 @@ func testGroupCompactE2e(t *testing.T, mergeFunc storage.VerticalChunkSeriesMerg
374391
meta, ok := others[defaultGroupKey(124, extLabels)]
375392
testutil.Assert(t, ok, "meta not found")
376393

377-
testutil.Equals(t, int64(0), meta.MinTime)
394+
testutil.Equals(t, int64(500), meta.MinTime)
378395
testutil.Equals(t, int64(3000), meta.MaxTime)
379396
testutil.Equals(t, uint64(6), meta.Stats.NumSeries)
380397
testutil.Equals(t, uint64(2*4*100), meta.Stats.NumSamples) // Only 2 times 4*100 because one block was empty.
@@ -413,7 +430,7 @@ type blockgenSpec struct {
413430
res int64
414431
}
415432

416-
func createAndUpload(t testing.TB, bkt objstore.Bucket, blocks []blockgenSpec) (metas []*metadata.Meta) {
433+
func createAndUpload(t testing.TB, bkt objstore.Bucket, blocks []blockgenSpec, blocksWithOutOfOrderChunks []blockgenSpec) (metas []*metadata.Meta) {
417434
prepareDir, err := ioutil.TempDir("", "test-compact-prepare")
418435
testutil.Ok(t, err)
419436
defer func() { testutil.Ok(t, os.RemoveAll(prepareDir)) }()
@@ -422,23 +439,35 @@ func createAndUpload(t testing.TB, bkt objstore.Bucket, blocks []blockgenSpec) (
422439
defer cancel()
423440

424441
for _, b := range blocks {
425-
var id ulid.ULID
426-
var err error
427-
if b.numSamples == 0 {
428-
id, err = e2eutil.CreateEmptyBlock(prepareDir, b.mint, b.maxt, b.extLset, b.res)
429-
} else {
430-
id, err = e2eutil.CreateBlock(ctx, prepareDir, b.series, b.numSamples, b.mint, b.maxt, b.extLset, b.res, metadata.NoneFunc)
431-
}
432-
testutil.Ok(t, err)
442+
id, meta := createBlock(t, ctx, prepareDir, b)
443+
metas = append(metas, meta)
444+
testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(prepareDir, id.String()), metadata.NoneFunc))
445+
}
446+
for _, b := range blocksWithOutOfOrderChunks {
447+
id, meta := createBlock(t, ctx, prepareDir, b)
433448

434-
meta, err := metadata.ReadFromDir(filepath.Join(prepareDir, id.String()))
449+
err := testutil.PutOutOfOrderIndex(filepath.Join(prepareDir, id.String()), b.mint, b.maxt)
435450
testutil.Ok(t, err)
436-
metas = append(metas, meta)
437451

452+
metas = append(metas, meta)
438453
testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(prepareDir, id.String()), metadata.NoneFunc))
439454
}
455+
440456
return metas
441457
}
458+
func createBlock(t testing.TB, ctx context.Context, prepareDir string, b blockgenSpec) (id ulid.ULID, meta *metadata.Meta) {
459+
var err error
460+
if b.numSamples == 0 {
461+
id, err = e2eutil.CreateEmptyBlock(prepareDir, b.mint, b.maxt, b.extLset, b.res)
462+
} else {
463+
id, err = e2eutil.CreateBlock(ctx, prepareDir, b.series, b.numSamples, b.mint, b.maxt, b.extLset, b.res, metadata.NoneFunc)
464+
}
465+
testutil.Ok(t, err)
466+
467+
meta, err = metadata.ReadFromDir(filepath.Join(prepareDir, id.String()))
468+
testutil.Ok(t, err)
469+
return
470+
}
442471

443472
// Regression test for #2459 issue.
444473
func TestGarbageCollectDoesntCreateEmptyBlocksWithDeletionMarksOnly(t *testing.T) {

0 commit comments

Comments
 (0)