- 
                Notifications
    
You must be signed in to change notification settings  - Fork 838
 
Replace EnableNativeHistograms from TSDB config with PerTenant Limit #6718
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
775b073
              c6b2e6a
              f5f203e
              a022a96
              File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| 
          
            
          
           | 
    @@ -1347,7 +1347,7 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte | |
| return nil, wrapWithUser(err, userID) | ||
| } | ||
| 
     | 
||
| if i.cfg.BlocksStorageConfig.TSDB.EnableNativeHistograms { | ||
| if i.limits.EnableNativeHistogramPerUser(userID) { | ||
| for _, hp := range ts.Histograms { | ||
| var ( | ||
| err error | ||
| 
          
            
          
           | 
    @@ -1494,7 +1494,7 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte | |
| i.validateMetrics.DiscardedSamples.WithLabelValues(perLabelsetSeriesLimit, userID).Add(float64(perLabelSetSeriesLimitCount)) | ||
| } | ||
| 
     | 
||
| if !i.cfg.BlocksStorageConfig.TSDB.EnableNativeHistograms && discardedNativeHistogramCount > 0 { | ||
| if !i.limits.EnableNativeHistogramPerUser(userID) && discardedNativeHistogramCount > 0 { | ||
| i.validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramSample, userID).Add(float64(discardedNativeHistogramCount)) | ||
| } | ||
| 
     | 
||
| 
          
            
          
           | 
    @@ -2451,9 +2451,9 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { | |
| EnableMemorySnapshotOnShutdown: i.cfg.BlocksStorageConfig.TSDB.MemorySnapshotOnShutdown, | ||
| OutOfOrderTimeWindow: time.Duration(oooTimeWindow).Milliseconds(), | ||
| OutOfOrderCapMax: i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapMax, | ||
| EnableOOONativeHistograms: i.cfg.BlocksStorageConfig.TSDB.EnableNativeHistograms, // Automatically enabled when EnableNativeHistograms is true. | ||
| EnableOverlappingCompaction: false, // Always let compactors handle overlapped blocks, e.g. OOO blocks. | ||
| EnableNativeHistograms: i.cfg.BlocksStorageConfig.TSDB.EnableNativeHistograms, | ||
| EnableOOONativeHistograms: true, | ||
| EnableOverlappingCompaction: false, // Always let compactors handle overlapped blocks, e.g. OOO blocks. | ||
| EnableNativeHistograms: true, // Always enable Native Histograms | ||
                
       | 
||
| BlockChunkQuerierFunc: i.blockChunkQuerierFunc(userID), | ||
| }, nil) | ||
| if err != nil { | ||
| 
          
            
          
           | 
    ||
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| 
          
            
          
           | 
    @@ -124,6 +124,7 @@ func seriesSetFromResponseStream(s *mockQueryStreamServer) (storage.SeriesSet, e | |
| 
     | 
||
| func TestMatcherCache(t *testing.T) { | ||
| limits := defaultLimitsTestConfig() | ||
| limits.EnableNativeHistogramPerUser = true | ||
| userID := "1" | ||
| tenantLimits := newMockTenantLimits(map[string]*validation.Limits{userID: &limits}) | ||
| registry := prometheus.NewRegistry() | ||
| 
        
          
        
         | 
    @@ -135,7 +136,7 @@ func TestMatcherCache(t *testing.T) { | |
| require.NoError(t, os.Mkdir(blocksDir, os.ModePerm)) | ||
| cfg := defaultIngesterTestConfig(t) | ||
| cfg.MatchersCacheMaxItems = 50 | ||
| ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, tenantLimits, blocksDir, registry, true) | ||
| ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, tenantLimits, blocksDir, registry) | ||
| require.NoError(t, err) | ||
| require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) | ||
| 
     | 
||
| 
          
            
          
           | 
    @@ -204,7 +205,7 @@ func TestIngesterDeletionRace(t *testing.T) { | |
| require.NoError(t, os.Mkdir(chunksDir, os.ModePerm)) | ||
| require.NoError(t, os.Mkdir(blocksDir, os.ModePerm)) | ||
| 
     | 
||
| ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, tenantLimits, blocksDir, registry, false) | ||
| ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, tenantLimits, blocksDir, registry) | ||
| require.NoError(t, err) | ||
| require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) | ||
| defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck | ||
| 
          
            
          
           | 
    @@ -254,6 +255,7 @@ func TestIngesterDeletionRace(t *testing.T) { | |
| 
     | 
||
| func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { | ||
| limits := defaultLimitsTestConfig() | ||
| limits.EnableNativeHistogramPerUser = true | ||
| userID := "1" | ||
| registry := prometheus.NewRegistry() | ||
| 
     | 
||
| 
          
            
          
           | 
    @@ -287,7 +289,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { | |
| require.NoError(t, os.Mkdir(chunksDir, os.ModePerm)) | ||
| require.NoError(t, os.Mkdir(blocksDir, os.ModePerm)) | ||
| 
     | 
||
| ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry, true) | ||
| ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry) | ||
| require.NoError(t, err) | ||
| require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) | ||
| // Wait until it's ACTIVE | ||
| 
          
            
          
           | 
    @@ -630,7 +632,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { | |
| // Should persist between restarts | ||
| services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck | ||
| registry = prometheus.NewRegistry() | ||
| ing, err = prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry, true) | ||
| ing, err = prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry) | ||
| require.NoError(t, err) | ||
| require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) | ||
| ing.updateActiveSeries(ctx) | ||
| 
          
            
          
           | 
    @@ -661,6 +663,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { | |
| func TestPushRace(t *testing.T) { | ||
| cfg := defaultIngesterTestConfig(t) | ||
| l := defaultLimitsTestConfig() | ||
| l.EnableNativeHistogramPerUser = true | ||
| cfg.LabelsStringInterningEnabled = true | ||
| cfg.LifecyclerConfig.JoinAfter = 0 | ||
| 
     | 
||
| 
        
          
        
         | 
    @@ -686,7 +689,7 @@ func TestPushRace(t *testing.T) { | |
| blocksDir := filepath.Join(dir, "blocks") | ||
| require.NoError(t, os.Mkdir(blocksDir, os.ModePerm)) | ||
| 
     | 
||
| ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, l, nil, blocksDir, prometheus.NewRegistry(), true) | ||
| ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, l, nil, blocksDir, prometheus.NewRegistry()) | ||
| require.NoError(t, err) | ||
| defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck | ||
| require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) | ||
| 
          
            
          
           | 
    @@ -747,6 +750,7 @@ func TestPushRace(t *testing.T) { | |
| 
     | 
||
| func TestIngesterUserLimitExceeded(t *testing.T) { | ||
| limits := defaultLimitsTestConfig() | ||
| limits.EnableNativeHistogramPerUser = true | ||
| limits.MaxLocalSeriesPerUser = 1 | ||
| limits.MaxLocalMetricsWithMetadataPerUser = 1 | ||
| 
     | 
||
| 
          
            
          
           | 
    @@ -778,7 +782,7 @@ func TestIngesterUserLimitExceeded(t *testing.T) { | |
| require.NoError(t, os.Mkdir(blocksDir, os.ModePerm)) | ||
| 
     | 
||
| blocksIngesterGenerator := func(reg prometheus.Registerer) *Ingester { | ||
| ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, reg, true) | ||
| ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, reg) | ||
| require.NoError(t, err) | ||
| require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) | ||
| // Wait until it's ACTIVE | ||
| 
          
            
          
           | 
    @@ -878,6 +882,7 @@ func benchmarkData(nSeries int) (allLabels []labels.Labels, allSamples []cortexp | |
| 
     | 
||
| func TestIngesterMetricLimitExceeded(t *testing.T) { | ||
| limits := defaultLimitsTestConfig() | ||
| limits.EnableNativeHistogramPerUser = true | ||
| limits.MaxLocalSeriesPerMetric = 1 | ||
| limits.MaxLocalMetadataPerMetric = 1 | ||
| 
     | 
||
| 
          
            
          
           | 
    @@ -909,7 +914,7 @@ func TestIngesterMetricLimitExceeded(t *testing.T) { | |
| require.NoError(t, os.Mkdir(blocksDir, os.ModePerm)) | ||
| 
     | 
||
| blocksIngesterGenerator := func(reg prometheus.Registerer) *Ingester { | ||
| ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, reg, true) | ||
| ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, reg) | ||
| require.NoError(t, err) | ||
| require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) | ||
| // Wait until it's ACTIVE | ||
| 
          
            
          
           | 
    @@ -1933,6 +1938,7 @@ func TestIngester_Push(t *testing.T) { | |
| cfg.ActiveSeriesMetricsEnabled = !testData.disableActiveSeries | ||
| 
     | 
||
| limits := defaultLimitsTestConfig() | ||
| limits.EnableNativeHistogramPerUser = !testData.disableNativeHistogram | ||
| limits.MaxExemplars = testData.maxExemplars | ||
| limits.OutOfOrderTimeWindow = model.Duration(testData.oooTimeWindow) | ||
| limits.LimitsPerLabelSet = []validation.LimitsPerLabelSet{ | ||
| 
        
          
        
         | 
    @@ -1945,7 +1951,7 @@ func TestIngester_Push(t *testing.T) { | |
| Hash: 1, | ||
| }, | ||
| } | ||
| i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry, !testData.disableNativeHistogram) | ||
| i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry) | ||
| require.NoError(t, err) | ||
| require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) | ||
| defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck | ||
| 
          
            
          
           | 
    @@ -2174,7 +2180,8 @@ func TestIngester_PushNativeHistogramErrors(t *testing.T) { | |
| cfg.LifecyclerConfig.JoinAfter = 0 | ||
| 
     | 
||
| limits := defaultLimitsTestConfig() | ||
| i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry, true) | ||
| limits.EnableNativeHistogramPerUser = true | ||
| i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry) | ||
| require.NoError(t, err) | ||
| require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) | ||
| defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck | ||
| 
          
            
          
           | 
    @@ -2662,6 +2669,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { | |
| cfg.LifecyclerConfig.JoinAfter = 0 | ||
| 
     | 
||
| limits := defaultLimitsTestConfig() | ||
| limits.EnableNativeHistogramPerUser = true | ||
| if !testData.prepareConfig(&limits, instanceLimits) { | ||
| b.SkipNow() | ||
| } | ||
| 
        
          
        
         | 
    @@ -2670,7 +2678,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { | |
| return instanceLimits | ||
| } | ||
| 
     | 
||
| ingester, err := prepareIngesterWithBlocksStorageAndLimits(b, cfg, limits, nil, "", registry, true) | ||
| ingester, err := prepareIngesterWithBlocksStorageAndLimits(b, cfg, limits, nil, "", registry) | ||
| require.NoError(b, err) | ||
| require.NoError(b, services.StartAndAwaitRunning(context.Background(), ingester)) | ||
| defer services.StopAndAwaitTerminated(context.Background(), ingester) //nolint:errcheck | ||
| 
          
            
          
           | 
    @@ -3947,10 +3955,10 @@ func mockHistogramWriteRequest(t *testing.T, lbls labels.Labels, value int64, ti | |
| } | ||
| 
     | 
||
| func prepareIngesterWithBlocksStorage(t testing.TB, ingesterCfg Config, registerer prometheus.Registerer) (*Ingester, error) { | ||
| return prepareIngesterWithBlocksStorageAndLimits(t, ingesterCfg, defaultLimitsTestConfig(), nil, "", registerer, true) | ||
| return prepareIngesterWithBlocksStorageAndLimits(t, ingesterCfg, defaultLimitsTestConfig(), nil, "", registerer) | ||
| } | ||
| 
     | 
||
| func prepareIngesterWithBlocksStorageAndLimits(t testing.TB, ingesterCfg Config, limits validation.Limits, tenantLimits validation.TenantLimits, dataDir string, registerer prometheus.Registerer, nativeHistograms bool) (*Ingester, error) { | ||
| func prepareIngesterWithBlocksStorageAndLimits(t testing.TB, ingesterCfg Config, limits validation.Limits, tenantLimits validation.TenantLimits, dataDir string, registerer prometheus.Registerer) (*Ingester, error) { | ||
| // Create a data dir if none has been provided. | ||
| if dataDir == "" { | ||
| dataDir = t.TempDir() | ||
| 
        
          
        
         | 
    @@ -3966,7 +3974,6 @@ func prepareIngesterWithBlocksStorageAndLimits(t testing.TB, ingesterCfg Config, | |
| ingesterCfg.BlocksStorageConfig.TSDB.Dir = dataDir | ||
| ingesterCfg.BlocksStorageConfig.Bucket.Backend = "filesystem" | ||
| ingesterCfg.BlocksStorageConfig.Bucket.Filesystem.Directory = bucketDir | ||
| ingesterCfg.BlocksStorageConfig.TSDB.EnableNativeHistograms = nativeHistograms | ||
| 
     | 
||
| ingester, err := New(ingesterCfg, overrides, registerer, log.NewNopLogger(), nil) | ||
| if err != nil { | ||
| 
          
            
          
           | 
    @@ -6432,15 +6439,16 @@ func TestIngester_MaxExemplarsFallBack(t *testing.T) { | |
| dir := t.TempDir() | ||
| blocksDir := filepath.Join(dir, "blocks") | ||
| limits := defaultLimitsTestConfig() | ||
| i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, blocksDir, prometheus.NewRegistry(), true) | ||
| limits.EnableNativeHistogramPerUser = true | ||
| i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, blocksDir, prometheus.NewRegistry()) | ||
| require.NoError(t, err) | ||
| 
     | 
||
| maxExemplars := i.getMaxExemplars("someTenant") | ||
| require.Equal(t, maxExemplars, int64(2)) | ||
| 
     | 
||
| // set max exemplars value in limits, and re-initialize the ingester | ||
| limits.MaxExemplars = 5 | ||
| i, err = prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, blocksDir, prometheus.NewRegistry(), true) | ||
| i, err = prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, blocksDir, prometheus.NewRegistry()) | ||
| require.NoError(t, err) | ||
| 
     | 
||
| // validate this value is picked up now | ||
| 
          
            
          
           | 
    @@ -6815,6 +6823,7 @@ func TestIngester_UpdateLabelSetMetrics(t *testing.T) { | |
| cfg.BlocksStorageConfig.TSDB.BlockRanges = []time.Duration{2 * time.Hour} | ||
| reg := prometheus.NewRegistry() | ||
| limits := defaultLimitsTestConfig() | ||
| limits.EnableNativeHistogramPerUser = true | ||
| userID := "1" | ||
| ctx := user.InjectOrgID(context.Background(), userID) | ||
| 
     | 
||
| 
        
          
        
         | 
    @@ -6839,7 +6848,7 @@ func TestIngester_UpdateLabelSetMetrics(t *testing.T) { | |
| require.NoError(t, os.Mkdir(chunksDir, os.ModePerm)) | ||
| require.NoError(t, os.Mkdir(blocksDir, os.ModePerm)) | ||
| 
     | 
||
| i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, tenantLimits, blocksDir, reg, false) | ||
| i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, tenantLimits, blocksDir, reg) | ||
| 
         There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do we have a test case for when native histogram is disabled and push should increment the samples discarded metrics? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.  | 
||
| require.NoError(t, err) | ||
| require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) | ||
| defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck | ||
| 
          
            
          
           | 
    ||
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| 
          
            
          
           | 
    @@ -172,9 +172,6 @@ type TSDBConfig struct { | |
| // OutOfOrderCapMax is maximum capacity for OOO chunks (in samples). | ||
| OutOfOrderCapMax int64 `yaml:"out_of_order_cap_max"` | ||
| 
     | 
||
| // Enable native histogram ingestion. | ||
| EnableNativeHistograms bool `yaml:"enable_native_histograms"` | ||
| 
     | 
||
| // Posting Cache Configuration for TSDB | ||
| PostingsCache TSDBPostingsCacheConfig `yaml:"expanded_postings_cache" doc:"description=[EXPERIMENTAL] If enabled, ingesters will cache expanded postings when querying blocks. Caching can be configured separately for the head and compacted blocks."` | ||
| } | ||
| 
          
            
          
           | 
    @@ -204,7 +201,6 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { | |
| f.IntVar(&cfg.MaxExemplars, "blocks-storage.tsdb.max-exemplars", 0, "Deprecated, use maxExemplars in limits instead. If the MaxExemplars value in limits is set to zero, cortex will fallback on this value. This setting enables support for exemplars in TSDB and sets the maximum number that will be stored. 0 or less means disabled.") | ||
| f.BoolVar(&cfg.MemorySnapshotOnShutdown, "blocks-storage.tsdb.memory-snapshot-on-shutdown", false, "True to enable snapshotting of in-memory TSDB data on disk when shutting down.") | ||
| f.Int64Var(&cfg.OutOfOrderCapMax, "blocks-storage.tsdb.out-of-order-cap-max", tsdb.DefaultOutOfOrderCapMax, "[EXPERIMENTAL] Configures the maximum number of samples per chunk that can be out-of-order.") | ||
| f.BoolVar(&cfg.EnableNativeHistograms, "blocks-storage.tsdb.enable-native-histograms", false, "[EXPERIMENTAL] True to enable native histogram.") | ||
| 
         There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is it still a breaking change if we keep the configuration name the same but move it to per tenant runtime config? I guess we don't have to rename it just to add  That way existing users won't be impacted as it is allowed to be set globally There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Did you mean keep the same CLI flag? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah. It is still kind of breaking as it moves from  There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks. Updated to keep the same config name.  | 
||
| 
     | 
||
| flagext.DeprecatedFlag(f, "blocks-storage.tsdb.wal-compression-enabled", "Deprecated (use blocks-storage.tsdb.wal-compression-type instead): True to enable TSDB WAL compression.", util_log.Logger) | ||
| 
     | 
||
| 
          
            
          
           | 
    ||
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| 
          
            
          
           | 
    @@ -144,11 +144,12 @@ type Limits struct { | |
| 
     | 
||
| // Ingester enforced limits. | ||
| // Series | ||
| MaxLocalSeriesPerUser int `yaml:"max_series_per_user" json:"max_series_per_user"` | ||
| MaxLocalSeriesPerMetric int `yaml:"max_series_per_metric" json:"max_series_per_metric"` | ||
| MaxGlobalSeriesPerUser int `yaml:"max_global_series_per_user" json:"max_global_series_per_user"` | ||
| MaxGlobalSeriesPerMetric int `yaml:"max_global_series_per_metric" json:"max_global_series_per_metric"` | ||
| LimitsPerLabelSet []LimitsPerLabelSet `yaml:"limits_per_label_set" json:"limits_per_label_set" doc:"nocli|description=[Experimental] Enable limits per LabelSet. Supported limits per labelSet: [max_series]"` | ||
| MaxLocalSeriesPerUser int `yaml:"max_series_per_user" json:"max_series_per_user"` | ||
| MaxLocalSeriesPerMetric int `yaml:"max_series_per_metric" json:"max_series_per_metric"` | ||
| MaxGlobalSeriesPerUser int `yaml:"max_global_series_per_user" json:"max_global_series_per_user"` | ||
| MaxGlobalSeriesPerMetric int `yaml:"max_global_series_per_metric" json:"max_global_series_per_metric"` | ||
| LimitsPerLabelSet []LimitsPerLabelSet `yaml:"limits_per_label_set" json:"limits_per_label_set" doc:"nocli|description=[Experimental] Enable limits per LabelSet. Supported limits per labelSet: [max_series]"` | ||
| EnableNativeHistogramPerUser bool `yaml:"enable_native_histogram_per_user" json:"enable_native_histogram_per_user"` | ||
| 
     | 
||
| // Metadata | ||
| MaxLocalMetricsWithMetadataPerUser int `yaml:"max_metadata_per_user" json:"max_metadata_per_user"` | ||
| 
          
            
          
           | 
    @@ -257,6 +258,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { | |
| f.IntVar(&l.MaxLocalSeriesPerMetric, "ingester.max-series-per-metric", 50000, "The maximum number of active series per metric name, per ingester. 0 to disable.") | ||
| f.IntVar(&l.MaxGlobalSeriesPerUser, "ingester.max-global-series-per-user", 0, "The maximum number of active series per user, across the cluster before replication. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.") | ||
| f.IntVar(&l.MaxGlobalSeriesPerMetric, "ingester.max-global-series-per-metric", 0, "The maximum number of active series per metric name, across the cluster before replication. 0 to disable.") | ||
| f.BoolVar(&l.EnableNativeHistogramPerUser, "ingester.enable_native_histogram_per_user", false, "Flag to enable NativeHistograms per user.") | ||
                
       | 
||
| f.IntVar(&l.MaxExemplars, "ingester.max-exemplars", 0, "Enables support for exemplars in TSDB and sets the maximum number that will be stored. less than zero means disabled. If the value is set to zero, cortex will fallback to blocks-storage.tsdb.max-exemplars value.") | ||
| f.Var(&l.OutOfOrderTimeWindow, "ingester.out-of-order-time-window", "[Experimental] Configures the allowed time window for ingestion of out-of-order samples. Disabled (0s) by default.") | ||
| 
     | 
||
| 
          
            
          
           | 
    @@ -659,6 +661,11 @@ func (o *Overrides) MaxGlobalSeriesPerUser(userID string) int { | |
| return o.GetOverridesForUser(userID).MaxGlobalSeriesPerUser | ||
| } | ||
| 
     | 
||
| // EnableNativeHistogramPerUser returns whether the Ingester should accept NativeHistograms samples from this user. | ||
| func (o *Overrides) EnableNativeHistogramPerUser(userID string) bool { | ||
| return o.GetOverridesForUser(userID).EnableNativeHistogramPerUser | ||
| } | ||
| 
     | 
||
| // OutOfOrderTimeWindow returns the allowed time window for ingestion of out-of-order samples. | ||
| func (o *Overrides) OutOfOrderTimeWindow(userID string) model.Duration { | ||
| return o.GetOverridesForUser(userID).OutOfOrderTimeWindow | ||
| 
          
            
          
           | 
    ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think we need to throw an exception here similar to Prometheus? Wdyt?
https://github.com/prometheus/prometheus/blob/main/tsdb/head_append.go#L647
Try ingesting some native histogram sample into current version of cortex with the NH feature disabled and see what is the behaviour.
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think current Cortex behaviour is that it count it as discarded sample here and not throw error here. But I m still checking by running Cortex.