|
| 1 | +# helm repo add elastic https://helm.elastic.co |
| 2 | +# helm upgrade --install elasticsearch elastic/elasticsearch -n elasticsearch -f ~/server/elasticsearch/values.yaml --version 8.5.1 |
| 3 | + |
| 4 | +--- |
| 5 | +clusterName: "elasticsearch" |
| 6 | +nodeGroup: "master" |
| 7 | + |
| 8 | +# The service that non master groups will try to connect to when joining the cluster |
| 9 | +# This should be set to clusterName + "-" + nodeGroup for your master group |
| 10 | +masterService: "" |
| 11 | + |
| 12 | +# Elasticsearch roles that will be applied to this nodeGroup |
| 13 | +# These will be set as environment variables. E.g. node.roles=master |
| 14 | +# https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles |
| 15 | +roles: |
| 16 | + - master |
| 17 | + - data |
| 18 | + - data_content |
| 19 | + - data_hot |
| 20 | + - data_warm |
| 21 | + - data_cold |
| 22 | + - ingest |
| 23 | + - ml |
| 24 | + - remote_cluster_client |
| 25 | + - transform |
| 26 | + |
| 27 | +replicas: 3 |
| 28 | +minimumMasterNodes: 2 |
| 29 | + |
| 30 | +esMajorVersion: "" |
| 31 | + |
| 32 | +# Allows you to add any config files in /usr/share/elasticsearch/config/ |
| 33 | +# such as elasticsearch.yml and log4j2.properties |
| 34 | +esConfig: |
| 35 | + # SSL is disabled: internal-only cluster, & self-signed certs in mastodon are a pain |
| 36 | + elasticsearch.yml: | |
| 37 | + xpack.security.enabled: false |
| 38 | + xpack.security.http.ssl.enabled: false |
| 39 | + xpack.security.transport.ssl.enabled: false |
| 40 | +# key: |
| 41 | +# nestedkey: value |
| 42 | +# log4j2.properties: | |
| 43 | +# key = value |
| 44 | + |
| 45 | +createCert: false |
| 46 | + |
| 47 | +esJvmOptions: {} |
| 48 | +# processors.options: | |
| 49 | +# -XX:ActiveProcessorCount=3 |
| 50 | + |
| 51 | +# Extra environment variables to append to this nodeGroup |
| 52 | +# This will be appended to the current 'env:' key. You can use any of the kubernetes env |
| 53 | +# syntax here |
| 54 | +extraEnvs: [] |
| 55 | +# - name: MY_ENVIRONMENT_VAR |
| 56 | +# value: the_value_goes_here |
| 57 | + |
| 58 | +# Allows you to load environment variables from kubernetes secret or config map |
| 59 | +envFrom: [] |
| 60 | +# - secretRef: |
| 61 | +# name: env-secret |
| 62 | +# - configMapRef: |
| 63 | +# name: config-map |
| 64 | + |
| 65 | +# Disable it to use your own elastic-credential Secret. |
| 66 | +secret: |
| 67 | + enabled: true |
| 68 | + password: "" # generated randomly if not defined |
| 69 | + |
| 70 | +# A list of secrets and their paths to mount inside the pod |
| 71 | +# This is useful for mounting certificates for security and for mounting |
| 72 | +# the X-Pack license |
| 73 | +secretMounts: [] |
| 74 | +# - name: elastic-certificates |
| 75 | +# secretName: elastic-certificates |
| 76 | +# path: /usr/share/elasticsearch/config/certs |
| 77 | +# defaultMode: 0755 |
| 78 | + |
| 79 | +hostAliases: [] |
| 80 | +#- ip: "127.0.0.1" |
| 81 | +# hostnames: |
| 82 | +# - "foo.local" |
| 83 | +# - "bar.local" |
| 84 | + |
| 85 | +image: "docker.elastic.co/elasticsearch/elasticsearch" |
| 86 | +imageTag: "8.5.1" |
| 87 | +imagePullPolicy: "IfNotPresent" |
| 88 | + |
| 89 | +podAnnotations: {} |
| 90 | +# iam.amazonaws.com/role: es-cluster |
| 91 | + |
| 92 | +# additionals labels |
| 93 | +labels: {} |
| 94 | + |
| 95 | +esJavaOpts: "" # example: "-Xmx1g -Xms1g" |
| 96 | + |
| 97 | +resources: |
| 98 | + requests: |
| 99 | + cpu: "1000m" |
| 100 | + memory: "2Gi" |
| 101 | + limits: |
| 102 | + cpu: "1000m" |
| 103 | + memory: "2Gi" |
| 104 | + |
| 105 | +initResources: {} |
| 106 | +# limits: |
| 107 | +# cpu: "25m" |
| 108 | +# # memory: "128Mi" |
| 109 | +# requests: |
| 110 | +# cpu: "25m" |
| 111 | +# memory: "128Mi" |
| 112 | + |
| 113 | +networkHost: "0.0.0.0" |
| 114 | + |
| 115 | +volumeClaimTemplate: |
| 116 | + accessModes: ["ReadWriteOnce"] |
| 117 | + resources: |
| 118 | + requests: |
| 119 | + storage: 30Gi |
| 120 | + storageClassName: "ceph-block" |
| 121 | + |
| 122 | +rbac: |
| 123 | + create: false |
| 124 | + serviceAccountAnnotations: {} |
| 125 | + serviceAccountName: "" |
| 126 | + automountToken: true |
| 127 | + |
| 128 | +podSecurityPolicy: |
| 129 | + create: false |
| 130 | + name: "" |
| 131 | + spec: |
| 132 | + privileged: true |
| 133 | + fsGroup: |
| 134 | + rule: RunAsAny |
| 135 | + runAsUser: |
| 136 | + rule: RunAsAny |
| 137 | + seLinux: |
| 138 | + rule: RunAsAny |
| 139 | + supplementalGroups: |
| 140 | + rule: RunAsAny |
| 141 | + volumes: |
| 142 | + - secret |
| 143 | + - configMap |
| 144 | + - persistentVolumeClaim |
| 145 | + - emptyDir |
| 146 | + |
| 147 | +persistence: |
| 148 | + enabled: true |
| 149 | + labels: |
| 150 | + # Add default labels for the volumeClaimTemplate of the StatefulSet |
| 151 | + enabled: false |
| 152 | + annotations: {} |
| 153 | + |
| 154 | +extraVolumes: [] |
| 155 | +# - name: extras |
| 156 | +# emptyDir: {} |
| 157 | + |
| 158 | +extraVolumeMounts: [] |
| 159 | +# - name: extras |
| 160 | +# mountPath: /usr/share/extras |
| 161 | +# readOnly: true |
| 162 | + |
| 163 | +extraContainers: [] |
| 164 | +# - name: do-something |
| 165 | +# image: busybox |
| 166 | +# command: ['do', 'something'] |
| 167 | + |
| 168 | +extraInitContainers: [] |
| 169 | +# - name: do-something |
| 170 | +# image: busybox |
| 171 | +# command: ['do', 'something'] |
| 172 | + |
| 173 | +# This is the PriorityClass settings as defined in |
| 174 | +# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass |
| 175 | +priorityClassName: "" |
| 176 | + |
| 177 | +# By default this will make sure two pods don't end up on the same node |
| 178 | +# Changing this to a region would allow you to spread pods across regions |
| 179 | +antiAffinityTopologyKey: "kubernetes.io/hostname" |
| 180 | + |
| 181 | +# Hard means that by default pods will only be scheduled if there are enough nodes for them |
| 182 | +# and that they will never end up on the same node. Setting this to soft will do this "best effort" |
| 183 | +antiAffinity: "hard" |
| 184 | + |
| 185 | +# This is the node affinity settings as defined in |
| 186 | +# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature |
| 187 | +nodeAffinity: {} |
| 188 | + |
| 189 | +# The default is to deploy all pods serially. By setting this to parallel all pods are started at |
| 190 | +# the same time when bootstrapping the cluster |
| 191 | +podManagementPolicy: "Parallel" |
| 192 | + |
| 193 | +# The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when |
| 194 | +# there are many services in the current namespace. |
| 195 | +# If you experience slow pod startups you probably want to set this to `false`. |
| 196 | +enableServiceLinks: true |
| 197 | + |
| 198 | +protocol: http |
| 199 | +httpPort: 9200 |
| 200 | +transportPort: 9300 |
| 201 | + |
| 202 | +service: |
| 203 | + enabled: true |
| 204 | + labels: {} |
| 205 | + labelsHeadless: {} |
| 206 | + type: ClusterIP |
| 207 | + # Consider that all endpoints are considered "ready" even if the Pods themselves are not |
| 208 | + # https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec |
| 209 | + publishNotReadyAddresses: false |
| 210 | + nodePort: "" |
| 211 | + annotations: {} |
| 212 | + httpPortName: http |
| 213 | + transportPortName: transport |
| 214 | + loadBalancerIP: "" |
| 215 | + loadBalancerSourceRanges: [] |
| 216 | + externalTrafficPolicy: "" |
| 217 | + |
| 218 | +updateStrategy: RollingUpdate |
| 219 | + |
| 220 | +# This is the max unavailable setting for the pod disruption budget |
| 221 | +# The default value of 1 will make sure that kubernetes won't allow more than 1 |
| 222 | +# of your pods to be unavailable during maintenance |
| 223 | +maxUnavailable: 1 |
| 224 | + |
| 225 | +podSecurityContext: |
| 226 | + fsGroup: 1000 |
| 227 | + runAsUser: 1000 |
| 228 | + |
| 229 | +securityContext: |
| 230 | + capabilities: |
| 231 | + drop: |
| 232 | + - ALL |
| 233 | + # readOnlyRootFilesystem: true |
| 234 | + runAsNonRoot: true |
| 235 | + runAsUser: 1000 |
| 236 | + |
| 237 | +# How long to wait for elasticsearch to stop gracefully |
| 238 | +terminationGracePeriod: 120 |
| 239 | + |
| 240 | +sysctlVmMaxMapCount: 262144 |
| 241 | + |
| 242 | +readinessProbe: |
| 243 | + failureThreshold: 3 |
| 244 | + initialDelaySeconds: 10 |
| 245 | + periodSeconds: 10 |
| 246 | + successThreshold: 3 |
| 247 | + timeoutSeconds: 5 |
| 248 | + |
| 249 | +# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status |
| 250 | +clusterHealthCheckParams: "wait_for_status=green&timeout=1s" |
| 251 | + |
| 252 | +## Use an alternate scheduler. |
| 253 | +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ |
| 254 | +## |
| 255 | +schedulerName: "" |
| 256 | + |
| 257 | +imagePullSecrets: [] |
| 258 | +nodeSelector: {} |
| 259 | +tolerations: [] |
| 260 | + |
| 261 | +# Enabling this will publicly expose your Elasticsearch instance. |
| 262 | +# Only enable this if you have security enabled on your cluster |
| 263 | +ingress: |
| 264 | + enabled: false |
| 265 | + annotations: {} |
| 266 | + # kubernetes.io/ingress.class: nginx |
| 267 | + # kubernetes.io/tls-acme: "true" |
| 268 | + className: "nginx" |
| 269 | + pathtype: ImplementationSpecific |
| 270 | + hosts: |
| 271 | + - host: chart-example.local |
| 272 | + paths: |
| 273 | + - path: / |
| 274 | + tls: [] |
| 275 | + # - secretName: chart-example-tls |
| 276 | + # hosts: |
| 277 | + # - chart-example.local |
| 278 | + |
| 279 | +nameOverride: "" |
| 280 | +fullnameOverride: "" |
| 281 | +healthNameOverride: "" |
| 282 | + |
| 283 | +lifecycle: {} |
| 284 | +# preStop: |
| 285 | +# exec: |
| 286 | +# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] |
| 287 | +# postStart: |
| 288 | +# exec: |
| 289 | +# command: |
| 290 | +# - bash |
| 291 | +# - -c |
| 292 | +# - | |
| 293 | +# #!/bin/bash |
| 294 | +# # Add a template to adjust number of shards/replicas |
| 295 | +# TEMPLATE_NAME=my_template |
| 296 | +# INDEX_PATTERN="logstash-*" |
| 297 | +# SHARD_COUNT=8 |
| 298 | +# REPLICA_COUNT=1 |
| 299 | +# ES_URL=http://localhost:9200 |
| 300 | +# while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done |
| 301 | +# curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' |
| 302 | + |
| 303 | +sysctlInitContainer: |
| 304 | + enabled: true |
| 305 | + |
| 306 | +keystore: [] |
| 307 | + |
| 308 | +networkPolicy: |
| 309 | + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. |
| 310 | + ## In order for a Pod to access Elasticsearch, it needs to have the following label: |
| 311 | + ## {{ template "uname" . }}-client: "true" |
| 312 | + ## Example for default configuration to access HTTP port: |
| 313 | + ## elasticsearch-master-http-client: "true" |
| 314 | + ## Example for default configuration to access transport port: |
| 315 | + ## elasticsearch-master-transport-client: "true" |
| 316 | + |
| 317 | + http: |
| 318 | + enabled: false |
| 319 | + ## if explicitNamespacesSelector is not set or set to {}, only client Pods being in the networkPolicy's namespace |
| 320 | + ## and matching all criteria can reach the DB. |
| 321 | + ## But sometimes, we want the Pods to be accessible to clients from other namespaces, in this case, we can use this |
| 322 | + ## parameter to select these namespaces |
| 323 | + ## |
| 324 | + # explicitNamespacesSelector: |
| 325 | + # # Accept from namespaces with all those different rules (only from whitelisted Pods) |
| 326 | + # matchLabels: |
| 327 | + # role: frontend |
| 328 | + # matchExpressions: |
| 329 | + # - {key: role, operator: In, values: [frontend]} |
| 330 | + |
| 331 | + ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. |
| 332 | + ## |
| 333 | + # additionalRules: |
| 334 | + # - podSelector: |
| 335 | + # matchLabels: |
| 336 | + # role: frontend |
| 337 | + # - podSelector: |
| 338 | + # matchExpressions: |
| 339 | + # - key: role |
| 340 | + # operator: In |
| 341 | + # values: |
| 342 | + # - frontend |
| 343 | + |
| 344 | + transport: |
| 345 | + ## Note that all Elasticsearch Pods can talk to themselves using transport port even if enabled. |
| 346 | + enabled: false |
| 347 | + # explicitNamespacesSelector: |
| 348 | + # matchLabels: |
| 349 | + # role: frontend |
| 350 | + # matchExpressions: |
| 351 | + # - {key: role, operator: In, values: [frontend]} |
| 352 | + # additionalRules: |
| 353 | + # - podSelector: |
| 354 | + # matchLabels: |
| 355 | + # role: frontend |
| 356 | + # - podSelector: |
| 357 | + # matchExpressions: |
| 358 | + # - key: role |
| 359 | + # operator: In |
| 360 | + # values: |
| 361 | + # - frontend |
| 362 | + |
| 363 | +tests: |
| 364 | + enabled: true |
0 commit comments