Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions pkg/ingester/ingester.go
Original file line number Diff line number Diff line change
Expand Up @@ -1347,7 +1347,7 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte
return nil, wrapWithUser(err, userID)
}

if i.cfg.BlocksStorageConfig.TSDB.EnableNativeHistograms {
if i.limits.EnableNativeHistogramPerUser(userID) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we need to throw an exception here similar to Prometheus? Wdyt?

https://github.com/prometheus/prometheus/blob/main/tsdb/head_append.go#L647

Try ingesting some native histogram sample into current version of cortex with the NH feature disabled and see what is the behaviour.

Copy link
Contributor Author

@PaurushGarg PaurushGarg Apr 24, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think current Cortex behaviour is that it count it as discarded sample here and not throw error here. But I m still checking by running Cortex.

for _, hp := range ts.Histograms {
var (
err error
Expand Down Expand Up @@ -1494,7 +1494,7 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte
i.validateMetrics.DiscardedSamples.WithLabelValues(perLabelsetSeriesLimit, userID).Add(float64(perLabelSetSeriesLimitCount))
}

if !i.cfg.BlocksStorageConfig.TSDB.EnableNativeHistograms && discardedNativeHistogramCount > 0 {
if !i.limits.EnableNativeHistogramPerUser(userID) && discardedNativeHistogramCount > 0 {
i.validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramSample, userID).Add(float64(discardedNativeHistogramCount))
}

Expand Down Expand Up @@ -2451,9 +2451,9 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) {
EnableMemorySnapshotOnShutdown: i.cfg.BlocksStorageConfig.TSDB.MemorySnapshotOnShutdown,
OutOfOrderTimeWindow: time.Duration(oooTimeWindow).Milliseconds(),
OutOfOrderCapMax: i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapMax,
EnableOOONativeHistograms: i.cfg.BlocksStorageConfig.TSDB.EnableNativeHistograms, // Automatically enabled when EnableNativeHistograms is true.
EnableOverlappingCompaction: false, // Always let compactors handle overlapped blocks, e.g. OOO blocks.
EnableNativeHistograms: i.cfg.BlocksStorageConfig.TSDB.EnableNativeHistograms,
EnableOOONativeHistograms: true,
EnableOverlappingCompaction: false, // Always let compactors handle overlapped blocks, e.g. OOO blocks.
EnableNativeHistograms: true, // Always enable Native Histograms
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe explain here that the gate keeping is done though a per tenant config at ingestion?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks. Added.

BlockChunkQuerierFunc: i.blockChunkQuerierFunc(userID),
}, nil)
if err != nil {
Expand Down
41 changes: 25 additions & 16 deletions pkg/ingester/ingester_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ func seriesSetFromResponseStream(s *mockQueryStreamServer) (storage.SeriesSet, e

func TestMatcherCache(t *testing.T) {
limits := defaultLimitsTestConfig()
limits.EnableNativeHistogramPerUser = true
userID := "1"
tenantLimits := newMockTenantLimits(map[string]*validation.Limits{userID: &limits})
registry := prometheus.NewRegistry()
Expand All @@ -135,7 +136,7 @@ func TestMatcherCache(t *testing.T) {
require.NoError(t, os.Mkdir(blocksDir, os.ModePerm))
cfg := defaultIngesterTestConfig(t)
cfg.MatchersCacheMaxItems = 50
ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, tenantLimits, blocksDir, registry, true)
ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, tenantLimits, blocksDir, registry)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing))

Expand Down Expand Up @@ -204,7 +205,7 @@ func TestIngesterDeletionRace(t *testing.T) {
require.NoError(t, os.Mkdir(chunksDir, os.ModePerm))
require.NoError(t, os.Mkdir(blocksDir, os.ModePerm))

ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, tenantLimits, blocksDir, registry, false)
ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, tenantLimits, blocksDir, registry)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing))
defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck
Expand Down Expand Up @@ -254,6 +255,7 @@ func TestIngesterDeletionRace(t *testing.T) {

func TestIngesterPerLabelsetLimitExceeded(t *testing.T) {
limits := defaultLimitsTestConfig()
limits.EnableNativeHistogramPerUser = true
userID := "1"
registry := prometheus.NewRegistry()

Expand Down Expand Up @@ -287,7 +289,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) {
require.NoError(t, os.Mkdir(chunksDir, os.ModePerm))
require.NoError(t, os.Mkdir(blocksDir, os.ModePerm))

ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry, true)
ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing))
// Wait until it's ACTIVE
Expand Down Expand Up @@ -630,7 +632,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) {
// Should persist between restarts
services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck
registry = prometheus.NewRegistry()
ing, err = prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry, true)
ing, err = prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing))
ing.updateActiveSeries(ctx)
Expand Down Expand Up @@ -661,6 +663,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) {
func TestPushRace(t *testing.T) {
cfg := defaultIngesterTestConfig(t)
l := defaultLimitsTestConfig()
l.EnableNativeHistogramPerUser = true
cfg.LabelsStringInterningEnabled = true
cfg.LifecyclerConfig.JoinAfter = 0

Expand All @@ -686,7 +689,7 @@ func TestPushRace(t *testing.T) {
blocksDir := filepath.Join(dir, "blocks")
require.NoError(t, os.Mkdir(blocksDir, os.ModePerm))

ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, l, nil, blocksDir, prometheus.NewRegistry(), true)
ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, l, nil, blocksDir, prometheus.NewRegistry())
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck
require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing))
Expand Down Expand Up @@ -747,6 +750,7 @@ func TestPushRace(t *testing.T) {

func TestIngesterUserLimitExceeded(t *testing.T) {
limits := defaultLimitsTestConfig()
limits.EnableNativeHistogramPerUser = true
limits.MaxLocalSeriesPerUser = 1
limits.MaxLocalMetricsWithMetadataPerUser = 1

Expand Down Expand Up @@ -778,7 +782,7 @@ func TestIngesterUserLimitExceeded(t *testing.T) {
require.NoError(t, os.Mkdir(blocksDir, os.ModePerm))

blocksIngesterGenerator := func(reg prometheus.Registerer) *Ingester {
ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, reg, true)
ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, reg)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing))
// Wait until it's ACTIVE
Expand Down Expand Up @@ -878,6 +882,7 @@ func benchmarkData(nSeries int) (allLabels []labels.Labels, allSamples []cortexp

func TestIngesterMetricLimitExceeded(t *testing.T) {
limits := defaultLimitsTestConfig()
limits.EnableNativeHistogramPerUser = true
limits.MaxLocalSeriesPerMetric = 1
limits.MaxLocalMetadataPerMetric = 1

Expand Down Expand Up @@ -909,7 +914,7 @@ func TestIngesterMetricLimitExceeded(t *testing.T) {
require.NoError(t, os.Mkdir(blocksDir, os.ModePerm))

blocksIngesterGenerator := func(reg prometheus.Registerer) *Ingester {
ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, reg, true)
ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, reg)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing))
// Wait until it's ACTIVE
Expand Down Expand Up @@ -1933,6 +1938,7 @@ func TestIngester_Push(t *testing.T) {
cfg.ActiveSeriesMetricsEnabled = !testData.disableActiveSeries

limits := defaultLimitsTestConfig()
limits.EnableNativeHistogramPerUser = !testData.disableNativeHistogram
limits.MaxExemplars = testData.maxExemplars
limits.OutOfOrderTimeWindow = model.Duration(testData.oooTimeWindow)
limits.LimitsPerLabelSet = []validation.LimitsPerLabelSet{
Expand All @@ -1945,7 +1951,7 @@ func TestIngester_Push(t *testing.T) {
Hash: 1,
},
}
i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry, !testData.disableNativeHistogram)
i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
Expand Down Expand Up @@ -2174,7 +2180,8 @@ func TestIngester_PushNativeHistogramErrors(t *testing.T) {
cfg.LifecyclerConfig.JoinAfter = 0

limits := defaultLimitsTestConfig()
i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry, true)
limits.EnableNativeHistogramPerUser = true
i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
Expand Down Expand Up @@ -2662,6 +2669,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) {
cfg.LifecyclerConfig.JoinAfter = 0

limits := defaultLimitsTestConfig()
limits.EnableNativeHistogramPerUser = true
if !testData.prepareConfig(&limits, instanceLimits) {
b.SkipNow()
}
Expand All @@ -2670,7 +2678,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) {
return instanceLimits
}

ingester, err := prepareIngesterWithBlocksStorageAndLimits(b, cfg, limits, nil, "", registry, true)
ingester, err := prepareIngesterWithBlocksStorageAndLimits(b, cfg, limits, nil, "", registry)
require.NoError(b, err)
require.NoError(b, services.StartAndAwaitRunning(context.Background(), ingester))
defer services.StopAndAwaitTerminated(context.Background(), ingester) //nolint:errcheck
Expand Down Expand Up @@ -3947,10 +3955,10 @@ func mockHistogramWriteRequest(t *testing.T, lbls labels.Labels, value int64, ti
}

func prepareIngesterWithBlocksStorage(t testing.TB, ingesterCfg Config, registerer prometheus.Registerer) (*Ingester, error) {
return prepareIngesterWithBlocksStorageAndLimits(t, ingesterCfg, defaultLimitsTestConfig(), nil, "", registerer, true)
return prepareIngesterWithBlocksStorageAndLimits(t, ingesterCfg, defaultLimitsTestConfig(), nil, "", registerer)
}

func prepareIngesterWithBlocksStorageAndLimits(t testing.TB, ingesterCfg Config, limits validation.Limits, tenantLimits validation.TenantLimits, dataDir string, registerer prometheus.Registerer, nativeHistograms bool) (*Ingester, error) {
func prepareIngesterWithBlocksStorageAndLimits(t testing.TB, ingesterCfg Config, limits validation.Limits, tenantLimits validation.TenantLimits, dataDir string, registerer prometheus.Registerer) (*Ingester, error) {
// Create a data dir if none has been provided.
if dataDir == "" {
dataDir = t.TempDir()
Expand All @@ -3966,7 +3974,6 @@ func prepareIngesterWithBlocksStorageAndLimits(t testing.TB, ingesterCfg Config,
ingesterCfg.BlocksStorageConfig.TSDB.Dir = dataDir
ingesterCfg.BlocksStorageConfig.Bucket.Backend = "filesystem"
ingesterCfg.BlocksStorageConfig.Bucket.Filesystem.Directory = bucketDir
ingesterCfg.BlocksStorageConfig.TSDB.EnableNativeHistograms = nativeHistograms

ingester, err := New(ingesterCfg, overrides, registerer, log.NewNopLogger(), nil)
if err != nil {
Expand Down Expand Up @@ -6432,15 +6439,16 @@ func TestIngester_MaxExemplarsFallBack(t *testing.T) {
dir := t.TempDir()
blocksDir := filepath.Join(dir, "blocks")
limits := defaultLimitsTestConfig()
i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, blocksDir, prometheus.NewRegistry(), true)
limits.EnableNativeHistogramPerUser = true
i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, blocksDir, prometheus.NewRegistry())
require.NoError(t, err)

maxExemplars := i.getMaxExemplars("someTenant")
require.Equal(t, maxExemplars, int64(2))

// set max exemplars value in limits, and re-initialize the ingester
limits.MaxExemplars = 5
i, err = prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, blocksDir, prometheus.NewRegistry(), true)
i, err = prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, blocksDir, prometheus.NewRegistry())
require.NoError(t, err)

// validate this value is picked up now
Expand Down Expand Up @@ -6815,6 +6823,7 @@ func TestIngester_UpdateLabelSetMetrics(t *testing.T) {
cfg.BlocksStorageConfig.TSDB.BlockRanges = []time.Duration{2 * time.Hour}
reg := prometheus.NewRegistry()
limits := defaultLimitsTestConfig()
limits.EnableNativeHistogramPerUser = true
userID := "1"
ctx := user.InjectOrgID(context.Background(), userID)

Expand All @@ -6839,7 +6848,7 @@ func TestIngester_UpdateLabelSetMetrics(t *testing.T) {
require.NoError(t, os.Mkdir(chunksDir, os.ModePerm))
require.NoError(t, os.Mkdir(blocksDir, os.ModePerm))

i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, tenantLimits, blocksDir, reg, false)
i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, tenantLimits, blocksDir, reg)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we have a test case for when native histogram is disabled and push should increment the samples discarded metrics?
If not, can you add one to TestIngester_Push?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
Expand Down
4 changes: 0 additions & 4 deletions pkg/storage/tsdb/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,6 @@ type TSDBConfig struct {
// OutOfOrderCapMax is maximum capacity for OOO chunks (in samples).
OutOfOrderCapMax int64 `yaml:"out_of_order_cap_max"`

// Enable native histogram ingestion.
EnableNativeHistograms bool `yaml:"enable_native_histograms"`

// Posting Cache Configuration for TSDB
PostingsCache TSDBPostingsCacheConfig `yaml:"expanded_postings_cache" doc:"description=[EXPERIMENTAL] If enabled, ingesters will cache expanded postings when querying blocks. Caching can be configured separately for the head and compacted blocks."`
}
Expand Down Expand Up @@ -204,7 +201,6 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&cfg.MaxExemplars, "blocks-storage.tsdb.max-exemplars", 0, "Deprecated, use maxExemplars in limits instead. If the MaxExemplars value in limits is set to zero, cortex will fallback on this value. This setting enables support for exemplars in TSDB and sets the maximum number that will be stored. 0 or less means disabled.")
f.BoolVar(&cfg.MemorySnapshotOnShutdown, "blocks-storage.tsdb.memory-snapshot-on-shutdown", false, "True to enable snapshotting of in-memory TSDB data on disk when shutting down.")
f.Int64Var(&cfg.OutOfOrderCapMax, "blocks-storage.tsdb.out-of-order-cap-max", tsdb.DefaultOutOfOrderCapMax, "[EXPERIMENTAL] Configures the maximum number of samples per chunk that can be out-of-order.")
f.BoolVar(&cfg.EnableNativeHistograms, "blocks-storage.tsdb.enable-native-histograms", false, "[EXPERIMENTAL] True to enable native histogram.")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will be a breaking change. But since this feature is experimental can we remove it and simplify the configurations?

@yeya24 @alanprot what do you guys think?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it still a breaking change if we keep the configuration name the same but move it to per tenant runtime config? I guess we don't have to rename it just to add _per_user suffix

That way existing users won't be impacted as it is allowed to be set globally

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did you mean keep the same CLI flag?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah. It is still kind of breaking as it moves from tsdb to limit section in the config file. But I guess it is fine for an experimental feature as you said.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks. Updated to keep the same config name.


flagext.DeprecatedFlag(f, "blocks-storage.tsdb.wal-compression-enabled", "Deprecated (use blocks-storage.tsdb.wal-compression-type instead): True to enable TSDB WAL compression.", util_log.Logger)

Expand Down
17 changes: 12 additions & 5 deletions pkg/util/validation/limits.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,11 +144,12 @@ type Limits struct {

// Ingester enforced limits.
// Series
MaxLocalSeriesPerUser int `yaml:"max_series_per_user" json:"max_series_per_user"`
MaxLocalSeriesPerMetric int `yaml:"max_series_per_metric" json:"max_series_per_metric"`
MaxGlobalSeriesPerUser int `yaml:"max_global_series_per_user" json:"max_global_series_per_user"`
MaxGlobalSeriesPerMetric int `yaml:"max_global_series_per_metric" json:"max_global_series_per_metric"`
LimitsPerLabelSet []LimitsPerLabelSet `yaml:"limits_per_label_set" json:"limits_per_label_set" doc:"nocli|description=[Experimental] Enable limits per LabelSet. Supported limits per labelSet: [max_series]"`
MaxLocalSeriesPerUser int `yaml:"max_series_per_user" json:"max_series_per_user"`
MaxLocalSeriesPerMetric int `yaml:"max_series_per_metric" json:"max_series_per_metric"`
MaxGlobalSeriesPerUser int `yaml:"max_global_series_per_user" json:"max_global_series_per_user"`
MaxGlobalSeriesPerMetric int `yaml:"max_global_series_per_metric" json:"max_global_series_per_metric"`
LimitsPerLabelSet []LimitsPerLabelSet `yaml:"limits_per_label_set" json:"limits_per_label_set" doc:"nocli|description=[Experimental] Enable limits per LabelSet. Supported limits per labelSet: [max_series]"`
EnableNativeHistogramPerUser bool `yaml:"enable_native_histogram_per_user" json:"enable_native_histogram_per_user"`

// Metadata
MaxLocalMetricsWithMetadataPerUser int `yaml:"max_metadata_per_user" json:"max_metadata_per_user"`
Expand Down Expand Up @@ -257,6 +258,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&l.MaxLocalSeriesPerMetric, "ingester.max-series-per-metric", 50000, "The maximum number of active series per metric name, per ingester. 0 to disable.")
f.IntVar(&l.MaxGlobalSeriesPerUser, "ingester.max-global-series-per-user", 0, "The maximum number of active series per user, across the cluster before replication. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.")
f.IntVar(&l.MaxGlobalSeriesPerMetric, "ingester.max-global-series-per-metric", 0, "The maximum number of active series per metric name, across the cluster before replication. 0 to disable.")
f.BoolVar(&l.EnableNativeHistogramPerUser, "ingester.enable_native_histogram_per_user", false, "Flag to enable NativeHistograms per user.")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you add the Experimental tag?

Copy link
Contributor Author

@PaurushGarg PaurushGarg Apr 24, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks. Added experimental tag.

f.IntVar(&l.MaxExemplars, "ingester.max-exemplars", 0, "Enables support for exemplars in TSDB and sets the maximum number that will be stored. less than zero means disabled. If the value is set to zero, cortex will fallback to blocks-storage.tsdb.max-exemplars value.")
f.Var(&l.OutOfOrderTimeWindow, "ingester.out-of-order-time-window", "[Experimental] Configures the allowed time window for ingestion of out-of-order samples. Disabled (0s) by default.")

Expand Down Expand Up @@ -659,6 +661,11 @@ func (o *Overrides) MaxGlobalSeriesPerUser(userID string) int {
return o.GetOverridesForUser(userID).MaxGlobalSeriesPerUser
}

// EnableNativeHistogramPerUser returns whether the Ingester should accept NativeHistograms samples from this user.
func (o *Overrides) EnableNativeHistogramPerUser(userID string) bool {
return o.GetOverridesForUser(userID).EnableNativeHistogramPerUser
}

// OutOfOrderTimeWindow returns the allowed time window for ingestion of out-of-order samples.
func (o *Overrides) OutOfOrderTimeWindow(userID string) model.Duration {
return o.GetOverridesForUser(userID).OutOfOrderTimeWindow
Expand Down
Loading