-
Notifications
You must be signed in to change notification settings - Fork 2.3k
/
Copy pathvalues.yaml
171 lines (164 loc) · 5.24 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
metricsGeneration:
# -- Toggle generation of spanmetrics and servicegraph metrics.
enabled: true
# -- Use legacy metric names that match those used by the Tempo metrics generator.
legacy: true
# -- Additional dimensions to add to generated metrics.
dimensions:
- service.namespace
- service.version
- deployment.environment
- k8s.cluster.name
- k8s.pod.name
sampling:
# -- Toggle tail sampling.
enabled: true
# -- Wait time since the first span of a trace before making a sampling decision.
decisionWait: 15s
successfulRequests:
# -- Toggle sampling successful requests.
sample: true
# -- Percentage of successful requests to sample.
percentage: 10
failedRequests:
# -- Toggle sampling failed requests.
sample: false
# -- Percentage of failed requests to sample.
percentage: 50
# -- User-defined policies in alloy format.
# @default -- A policy to sample long requests is added by default.
extraPolicies: |-
policy {
name = "sample-long-requests"
type = "and"
and {
and_sub_policy {
name = "latency"
type = "latency"
latency {
threshold_ms = 5000
}
}
and_sub_policy {
name = "probabilistic-policy"
type = "probabilistic"
probabilistic {
sampling_percentage = 50
}
}
}
}
batch:
# -- Configure batch processing options.
deployment:
timeout: 200ms
send_batch_size: 8192
send_batch_max_size: 0
statefulset:
timeout: 200ms
send_batch_size: 8192
send_batch_max_size: 0
deployment:
otlp:
# -- otlp receiver settings for deployment (loadbalancer)
receiver:
grpc:
# -- gRPC max message receive size. Default to 4MB
max_recv_msg_size: 4MB
statefulset:
otlp:
# -- otlp receiver settings for statefulset (sampler)
receiver:
grpc:
# -- gRPC max message receive size. Default to 4MB
max_recv_msg_size: 4MB
# @ignored Ignore alloy deployment
alloy-deployment:
# -- Do not change this.
nameOverride: deployment
controller:
type: deployment
replicas: 1
autoscaling:
# -- Creates a HorizontalPodAutoscaler for controller type deployment.
enabled: false
# -- The lower limit for the number of replicas to which the autoscaler can scale down.
minReplicas: 2
# -- The upper limit for the number of replicas to which the autoscaler can scale up.
maxReplicas: 5
# -- Average CPU utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetCPUUtilizationPercentage` to 0 will disable CPU scaling.
targetCPUUtilizationPercentage: 0
# -- Average Memory utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetMemoryUtilizationPercentage` to 0 will disable Memory scaling.
targetMemoryUtilizationPercentage: 80
alloy:
# This chart creates the configmaps
configMap:
create: false
resources:
requests:
cpu: "1"
memory: "2G"
extraPorts:
- name: otlp-grpc
port: 4317
targetPort: 4317
protocol: TCP
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
# @ignored Ignore alloy statefulset
alloy-statefulset:
# -- Do not change this.
nameOverride: statefulset
controller:
type: statefulset
replicas: 1
autoscaling:
# -- Creates a HorizontalPodAutoscaler for controller type deployment.
enabled: false
# -- The lower limit for the number of replicas to which the autoscaler can scale down.
minReplicas: 2
# -- The upper limit for the number of replicas to which the autoscaler can scale up.
maxReplicas: 5
# -- Average CPU utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetCPUUtilizationPercentage` to 0 will disable CPU scaling.
targetCPUUtilizationPercentage: 0
# -- Average Memory utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetMemoryUtilizationPercentage` to 0 will disable Memory scaling.
targetMemoryUtilizationPercentage: 80
service:
clusterIP: None
alloy:
extraEnv:
- name: GRAFANA_CLOUD_API_KEY
value: <REQUIRED>
- name: GRAFANA_CLOUD_PROMETHEUS_URL
value: <REQUIRED>
- name: GRAFANA_CLOUD_PROMETHEUS_USERNAME
value: <REQUIRED>
- name: GRAFANA_CLOUD_TEMPO_ENDPOINT
value: <REQUIRED>
- name: GRAFANA_CLOUD_TEMPO_USERNAME
value: <REQUIRED>
# This is required for adaptive metric deduplication in Grafana Cloud
- name: POD_UID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.uid
# This chart creates the configmaps
configMap:
create: false
resources:
requests:
cpu: "1"
memory: "2G"
extraPorts:
- name: otlp-grpc
port: 4317
targetPort: 4317
protocol: TCP
# The statefulset and deployment can share the same serviceAccount and rbac roles
serviceAccount:
create: false
rbac:
create: false