forked from rudderlabs/rudderstack-helm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
values.yaml
157 lines (143 loc) · 4.13 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
# Default values for rudderstack.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Deployment specific values for rudderstack.
# Following values must be filled in for the deployment to succeed
# Please uncomment below lines and fill values accordingly.
# Please enter api token obtained from rudder dashboard below
# rudderWorkspaceToken:
global:
# backendReplicaCount decides the replica count for rudder backend and postgresql containers
backendReplicaCount: 1
imagePullSecrets: []
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
affinity: {}
backend:
probes: {}
image:
repository: rudderlabs/rudder-server
version: "v0.1.9-rc.7"
pullPolicy: Always
controlPlaneJSON: false
service:
annotations:
## Refer https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer for more annotations
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
## For enabling https on aws,
## uncomment below line with acm managed certificate arn and change port value below to 443
# service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012
type: LoadBalancer
port: 80
targetPort: 8080
resources:
requests:
memory: 2560Mi
limits:
memory: 5120Mi
nodeSelector: {}
persistence:
mountPath: /data/rudderstack
accessModes:
- ReadWriteOnce
size: 36Gi
annotations: {}
tolerations: []
config:
mountPath: "/etc/rudderstack"
extraEnvVars:
- name: CONFIG_BACKEND_URL
value: "https://api.rudderlabs.com"
- name: CONFIG_PATH
value: "/etc/rudderstack/config.toml"
# These credentials are used for backing up successfully processed events
- name: AWS_ACCESS_KEY_ID
value: "Add your aws key id here"
- name: AWS_SECRET_ACCESS_KEY
value: "Add your aws access secret here"
- name: BUGSNAG_KEY
value: "3669852453c688bb50a0a2d27bf0ee58"
- name: RUDDER_TMPDIR
value: "/data/rudderstack"
- name: JOBS_BACKUP_STORAGE_PROVIDER
value: "S3" # object storage provider to store backups eg. S3, GCS, AZURE_BLOB, MINIO
- name: JOBS_BACKUP_BUCKET
value: "Add your bucket name to store backups of incoming events"
- name: JOB_STATUS_BACKUP_BUCKET
value: "Add your bucket name to store backups of status of incoming events"
- name: GOOGLE_APPLICATION_CREDENTIALS
value: "/etc/rudderstack/google-application-credentials.json"
- name: LOG_LEVEL
value: "INFO" # eg. DEBUG, ERROR
transformer:
replicaCount: 1
service:
port: 9090
image:
repository: rudderlabs/rudder-transformer
version: "v0.1.5-rc.0"
pullPolicy: Always
resources:
requests:
memory: 256Mi
limits:
memory: 768Mi
postgresql:
nameOverride: "rudderstack-postgresql"
postgresqlUsername: rudder
postgresqlPassword: password
postgresqlDatabase: jobsdb
postgresqlRunAsUser: 70
imagePullPolicy: IfNotPresent
image:
repository: postgres
tag: "11-alpine"
pullPolicy: IfNotPresent
persistence:
size: 100Gi
replication:
enabled: false
service:
type: "ClusterIP"
port: 5432
resources:
requests:
memory: 2048Mi
limits:
memory: 4096Mi
telegraf_sidecar:
enabled: true
name: telegraf
version: 1.5.0
nameOverride: "rudderstack"
image:
repo: "telegraf"
tag: "1.12-alpine"
pullPolicy: IfNotPresent
resources:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 128Mi
cpu: 100m
config:
mountPath: /etc/telegraf
agent:
interval: "10s"
outputs:
- influxdb:
urls: []
# - "http://influxdb.monitoring.svc:8086"
database: "telegraf"
statsd_percentiles:
- 50
- 90
- 95
- 99
affinity: {}