@@ -33,6 +33,7 @@ import (
33
33
"github.com/prometheus/prometheus/tsdb/chunkenc"
34
34
"github.com/prometheus/prometheus/tsdb/chunks"
35
35
"github.com/prometheus/prometheus/tsdb/wlog"
36
+ "github.com/prometheus/prometheus/util/zeropool"
36
37
"github.com/thanos-io/objstore"
37
38
"github.com/thanos-io/thanos/pkg/block/metadata"
38
39
"github.com/thanos-io/thanos/pkg/shipper"
@@ -95,6 +96,8 @@ const (
95
96
var (
96
97
errExemplarRef = errors .New ("exemplars not ingested because series not already present" )
97
98
errIngesterStopping = errors .New ("ingester stopping" )
99
+
100
+ tsChunksPool zeropool.Pool [[]client.TimeSeriesChunk ]
98
101
)
99
102
100
103
// Config for an Ingester.
@@ -2055,7 +2058,8 @@ func (i *Ingester) queryStreamChunks(ctx context.Context, db *userTSDB, from, th
2055
2058
return 0 , 0 , 0 , 0 , ss .Err ()
2056
2059
}
2057
2060
2058
- chunkSeries := make ([]client.TimeSeriesChunk , 0 , queryStreamBatchSize )
2061
+ chunkSeries := getTimeSeriesChunksSlice ()
2062
+ defer putTimeSeriesChunksSlice (chunkSeries )
2059
2063
batchSizeBytes := 0
2060
2064
var it chunks.Iterator
2061
2065
for ss .Next () {
@@ -3072,6 +3076,31 @@ func (i *Ingester) ModeHandler(w http.ResponseWriter, r *http.Request) {
3072
3076
_ , _ = w .Write ([]byte (respMsg ))
3073
3077
}
3074
3078
3079
+ func (i * Ingester ) getInstanceLimits () * InstanceLimits {
3080
+ // Don't apply any limits while starting. We especially don't want to apply series in memory limit while replaying WAL.
3081
+ if i .State () == services .Starting {
3082
+ return nil
3083
+ }
3084
+
3085
+ if i .cfg .InstanceLimitsFn == nil {
3086
+ return defaultInstanceLimits
3087
+ }
3088
+
3089
+ l := i .cfg .InstanceLimitsFn ()
3090
+ if l == nil {
3091
+ return defaultInstanceLimits
3092
+ }
3093
+
3094
+ return l
3095
+ }
3096
+
3097
+ // stopIncomingRequests is called during the shutdown process.
3098
+ func (i * Ingester ) stopIncomingRequests () {
3099
+ i .stoppedMtx .Lock ()
3100
+ defer i .stoppedMtx .Unlock ()
3101
+ i .stopped = true
3102
+ }
3103
+
3075
3104
// metadataQueryRange returns the best range to query for metadata queries based on the timerange in the ingester.
3076
3105
func metadataQueryRange (queryStart , queryEnd int64 , db * userTSDB , queryIngestersWithin time.Duration ) (mint , maxt int64 , err error ) {
3077
3106
if queryIngestersWithin > 0 {
@@ -3129,27 +3158,16 @@ func wrappedTSDBIngestExemplarErr(ingestErr error, timestamp model.Time, seriesL
3129
3158
)
3130
3159
}
3131
3160
3132
- func (i * Ingester ) getInstanceLimits () * InstanceLimits {
3133
- // Don't apply any limits while starting. We especially don't want to apply series in memory limit while replaying WAL.
3134
- if i .State () == services .Starting {
3135
- return nil
3136
- }
3137
-
3138
- if i .cfg .InstanceLimitsFn == nil {
3139
- return defaultInstanceLimits
3161
+ func getTimeSeriesChunksSlice () []client.TimeSeriesChunk {
3162
+ if p := tsChunksPool .Get (); p != nil {
3163
+ return p
3140
3164
}
3141
3165
3142
- l := i .cfg .InstanceLimitsFn ()
3143
- if l == nil {
3144
- return defaultInstanceLimits
3145
- }
3146
-
3147
- return l
3166
+ return make ([]client.TimeSeriesChunk , 0 , queryStreamBatchSize )
3148
3167
}
3149
3168
3150
- // stopIncomingRequests is called during the shutdown process.
3151
- func (i * Ingester ) stopIncomingRequests () {
3152
- i .stoppedMtx .Lock ()
3153
- defer i .stoppedMtx .Unlock ()
3154
- i .stopped = true
3169
+ func putTimeSeriesChunksSlice (p []client.TimeSeriesChunk ) {
3170
+ if p != nil {
3171
+ tsChunksPool .Put (p [:0 ])
3172
+ }
3155
3173
}
0 commit comments